query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
See ticket 2107 for the bug that this tickles.
def test_do_manga_dither_after_sequence(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) dither = 'N' cmdState = self.actorState.doMangaSequence cmdState.reinitialize(self.cmd) cmdState.count = 1 cmdState.dithers = 'NSE' cmdState.reset_ditherSeq() self.cmd.verbose = False masterThread.do_apogeemanga_sequence(self.cmd, cmdState, myGlobals.actorState) self.cmd.reset() self.cmd.verbose = self.verbose self._do_manga_dither(4, 28, 0, 0, dither=dither)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tick(self):", "def tick(self):\r\n pass", "def tick(self):\n pass", "def tick(self):\n pass", "def tick_descent(self):\n pass", "def lastTick():", "def tick(self, tick):\n pass", "def tick(self, tick):\n pass", "def tick_skipped(self):\n pass", "def tick(self):\n return True", "def process_tick(self, tick):\n pass", "def dummy_update( self ):\r\n pass", "def update_tick(self, tick: InstigatorTick):", "def update_job_tick(self, tick):", "def dummy(self):\n pass", "def test_issue_tracked_times(self):\n pass", "def solvate(self):\n\n pass", "def get_tick():\n return _tick", "def before_tick(self, time):\n pass", "def cool(self):\n self.t = self.t - 1", "def task4_1(self):\n\n pass", "def b(self):\n pass", "def b(self):\n pass", "def dead_end_value(self):\n pass", "def think(self):\n pass", "def make_uncurrent(self):\n pass", "def tick(self, dt):\n pass", "def _dummy(ticket):\r\n return True", "def c(self):\n pass", "def c(self):\n pass", "def tick(self):\n self.count += 1", "def onJT808Operation(self):\n pass", "def first_tick(self, time):\n pass", "def on_tick(self, time):\n pass", "def ticker_wrapper(ticker):", "def wrapup(self):\n pass", "def create_job_tick(self, job_tick_data):", "def swint(self) -> None:", "def exercise_b2_106():\r\n pass", "def test_full_house_flush_ind(self):", "def game_tick_run(self):\n pass", "def x(self):\n pass", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def _lazy_axis(self):\n raise NotImplementedError", "def delta(self) -> None:", "def Update(self, ticks=0):", "def tick():\n global current\n current += 1", "def tick():\n global current\n current += 1", "def task4(self):\n\n pass", "def exercise_b2_107():\r\n pass", "def create_tick(self, tick_data: TickData):", "def test_quad_flush_ind(self):", "def exercise_b2_53():\r\n pass", "def y(self):\n pass", "def after_tick(self, time):\n pass", "def _tick(self):\n\t\tself.pay_tax()\n\t\tself.inhabitant_check()\n\t\tself.level_check()", "def x_lb(self):\n pass", "def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000", "def high(self):", "def realtime(self):", "def task5(self):\n\n pass", "def on_tick(self, tick: TickData):\n if tick and tick.bid_price_1 > 0:\n self.tick = tick", "def busy(self):\n pass", "def CL(self):", "def tick(self):\n\n if self.seconds != 59:\n self.seconds += 1\n else:\n self.seconds = 0\n\n if self.minutes != 59:\n self.minutes += 1\n else:\n self.minutes = 0\n\n if self.hours != 23:\n self.hours += 1\n else:\n self.hours = 0", "def test_update9(self):\n pass", "def task3(self):\n\n pass", "def takeoff(self, n, e, d):\n pass", "def evals(self):\n\t\tpass", "def _prey_step(self):\n raise NotImplementedError()", "def exercise_b2_82():\r\n pass", "def MINET(self):", "def tick():\n global counter\n counter += 1", "def exercise_b2_27():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_93():\r\n pass", "def __int__(self):\n pass", "def __periodic_maintenance__(self):\n pass", "def exercise_b2_113():\r\n pass", "def g(self):\n return 2", "def _run_cycle(self):\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_69():\r\n pass", "def naked_singles(self):\n self.change = True\n while self.change:\n self.naked_round()", "def test_issue_add_time(self):\n pass", "def last_tick_time(self):\n return self.last_tick_", "def intuit(self):\n raise NotImplemented()", "def beforeUpdate(self):", "def test_4_4_1_1(self):\n pass", "def ctime(self): # real signature unknown; restored from __doc__\r\n pass", "def testBucketDrain(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.clock.set(10)\n fit = b.add(1000)\n self.assertEqual(20, fit)", "def test_e1_get_tick_changes(self):\n config.NR_ROWS = 5\n config.NR_COLS = 5\n blinker = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n tick_changes = logic.get_tick_changes(blinker)\n\n self.assertEqual(tick_changes, [\n (1, 2, 4),\n (2, 1, 1),\n (2, 3, 1),\n (3, 2, 4),\n ])", "def x_ub(self):\n pass", "def test_update_state4(self):\n pass", "def __call__(self) -> None:", "async def _expand_ticks(ticks: List[Dict[str, float]]):\n\n tick = ticks[0]\n close_times = array('d')\n close_values = array('d')\n last_time = tick['T']\n last_value = tick['C']\n close_times.append(last_time)\n close_values.append(last_value)\n\n for tick in ticks[1:]:\n close_time = tick['T']\n\n while int(close_time - last_time) > config['tick_interval_secs']:\n last_time += config['tick_interval_secs']\n close_times.append(last_time)\n close_values.append(last_value)\n\n last_time = close_time\n last_value = tick['C']\n close_times.append(last_time)\n close_values.append(last_value)\n\n return (close_times, close_values)", "def jumped_on(self):\r\n pass", "def exercise_b2_26():\r\n pass", "def untargeted(self):\n\t\tpass", "def update(self, ticks):\n pass", "def event_bc(self):\n self.evt_bc = True\n self.reschedule()" ]
[ "0.68404317", "0.67841446", "0.67153627", "0.67153627", "0.6649946", "0.6539368", "0.6539086", "0.6539086", "0.6154324", "0.60770965", "0.6065959", "0.6026124", "0.5937391", "0.58856446", "0.580046", "0.5741288", "0.5730749", "0.57282233", "0.5695636", "0.56728476", "0.5643949", "0.5634358", "0.5634358", "0.5582232", "0.5579612", "0.5542619", "0.55374914", "0.5491304", "0.54846424", "0.54846424", "0.54679227", "0.5462092", "0.5446829", "0.5445301", "0.5427396", "0.5414503", "0.54103726", "0.5404217", "0.53672624", "0.535537", "0.53547704", "0.53534955", "0.53494716", "0.53469723", "0.53419673", "0.5340354", "0.5328896", "0.5328896", "0.53201777", "0.53051686", "0.52662146", "0.52630216", "0.524173", "0.52352417", "0.5231466", "0.51974607", "0.5191877", "0.5185654", "0.51851064", "0.5183554", "0.51684415", "0.51681983", "0.51664203", "0.51592875", "0.5156849", "0.51457375", "0.5144734", "0.5142657", "0.5137873", "0.5118744", "0.5111726", "0.51078033", "0.5107739", "0.51063377", "0.5101025", "0.5097837", "0.50943613", "0.50902134", "0.5080983", "0.5069678", "0.50675607", "0.50661266", "0.5062386", "0.50564784", "0.5055748", "0.5050059", "0.50471425", "0.5033682", "0.5031243", "0.5026203", "0.50233245", "0.5021944", "0.50211126", "0.5018448", "0.50143266", "0.5012614", "0.50123143", "0.5010839", "0.5008562", "0.5005293", "0.5004073" ]
0.0
-1
See ticket 2107 for the bug that this tickles.
def test_do_apogeemanga_dither_after_sequence(self): sopTester.updateModel('mcp', TestHelper.mcpState['apogee_science']) sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) sopTester.updateModel('platedb', TestHelper.platedbState['apgoeemangaDither']) self._update_cart(1, 'APOGEE&MaNGA', 'MaNGA dither') mangaDither = 'N' cmdState = self.actorState.doApogeeMangaSequence cmdState.reinitialize(self.cmd) cmdState.count = 1 cmdState.mangaDithers = 'NSE' cmdState.reset_ditherSeq() self.cmd.verbose = False masterThread.do_apogeemanga_sequence(self.cmd, cmdState, myGlobals.actorState) self.cmd.reset() self.cmd.verbose = self.verbose sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) self._do_apogeemanga_dither(7, 37, 0, 0, mangaDither)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tick(self):", "def tick(self):\r\n pass", "def tick(self):\n pass", "def tick(self):\n pass", "def tick_descent(self):\n pass", "def tick(self, tick):\n pass", "def tick(self, tick):\n pass", "def lastTick():", "def tick_skipped(self):\n pass", "def tick(self):\n return True", "def process_tick(self, tick):\n pass", "def dummy_update( self ):\r\n pass", "def update_tick(self, tick: InstigatorTick):", "def update_job_tick(self, tick):", "def dummy(self):\n pass", "def test_issue_tracked_times(self):\n pass", "def get_tick():\n return _tick", "def solvate(self):\n\n pass", "def before_tick(self, time):\n pass", "def cool(self):\n self.t = self.t - 1", "def task4_1(self):\n\n pass", "def b(self):\n pass", "def b(self):\n pass", "def dead_end_value(self):\n pass", "def think(self):\n pass", "def make_uncurrent(self):\n pass", "def tick(self, dt):\n pass", "def _dummy(ticket):\r\n return True", "def c(self):\n pass", "def c(self):\n pass", "def tick(self):\n self.count += 1", "def onJT808Operation(self):\n pass", "def first_tick(self, time):\n pass", "def on_tick(self, time):\n pass", "def ticker_wrapper(ticker):", "def wrapup(self):\n pass", "def create_job_tick(self, job_tick_data):", "def swint(self) -> None:", "def exercise_b2_106():\r\n pass", "def game_tick_run(self):\n pass", "def test_full_house_flush_ind(self):", "def x(self):\n pass", "def slot_tick(self, gox, (bid, ask)):\r\n pass", "def _lazy_axis(self):\n raise NotImplementedError", "def delta(self) -> None:", "def Update(self, ticks=0):", "def tick():\n global current\n current += 1", "def tick():\n global current\n current += 1", "def task4(self):\n\n pass", "def exercise_b2_107():\r\n pass", "def create_tick(self, tick_data: TickData):", "def test_quad_flush_ind(self):", "def exercise_b2_53():\r\n pass", "def y(self):\n pass", "def after_tick(self, time):\n pass", "def _tick(self):\n\t\tself.pay_tax()\n\t\tself.inhabitant_check()\n\t\tself.level_check()", "def x_lb(self):\n pass", "def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000", "def high(self):", "def realtime(self):", "def on_tick(self, tick: TickData):\n if tick and tick.bid_price_1 > 0:\n self.tick = tick", "def task5(self):\n\n pass", "def busy(self):\n pass", "def CL(self):", "def tick(self):\n\n if self.seconds != 59:\n self.seconds += 1\n else:\n self.seconds = 0\n\n if self.minutes != 59:\n self.minutes += 1\n else:\n self.minutes = 0\n\n if self.hours != 23:\n self.hours += 1\n else:\n self.hours = 0", "def test_update9(self):\n pass", "def task3(self):\n\n pass", "def takeoff(self, n, e, d):\n pass", "def evals(self):\n\t\tpass", "def _prey_step(self):\n raise NotImplementedError()", "def exercise_b2_82():\r\n pass", "def MINET(self):", "def tick():\n global counter\n counter += 1", "def exercise_b2_27():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_93():\r\n pass", "def __int__(self):\n pass", "def __periodic_maintenance__(self):\n pass", "def exercise_b2_113():\r\n pass", "def g(self):\n return 2", "def _run_cycle(self):\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_69():\r\n pass", "def test_issue_add_time(self):\n pass", "def naked_singles(self):\n self.change = True\n while self.change:\n self.naked_round()", "def last_tick_time(self):\n return self.last_tick_", "def intuit(self):\n raise NotImplemented()", "def beforeUpdate(self):", "def test_4_4_1_1(self):\n pass", "def ctime(self): # real signature unknown; restored from __doc__\r\n pass", "def test_e1_get_tick_changes(self):\n config.NR_ROWS = 5\n config.NR_COLS = 5\n blinker = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n tick_changes = logic.get_tick_changes(blinker)\n\n self.assertEqual(tick_changes, [\n (1, 2, 4),\n (2, 1, 1),\n (2, 3, 1),\n (3, 2, 4),\n ])", "def testBucketDrain(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.clock.set(10)\n fit = b.add(1000)\n self.assertEqual(20, fit)", "def x_ub(self):\n pass", "def test_update_state4(self):\n pass", "def __call__(self) -> None:", "async def _expand_ticks(ticks: List[Dict[str, float]]):\n\n tick = ticks[0]\n close_times = array('d')\n close_values = array('d')\n last_time = tick['T']\n last_value = tick['C']\n close_times.append(last_time)\n close_values.append(last_value)\n\n for tick in ticks[1:]:\n close_time = tick['T']\n\n while int(close_time - last_time) > config['tick_interval_secs']:\n last_time += config['tick_interval_secs']\n close_times.append(last_time)\n close_values.append(last_value)\n\n last_time = close_time\n last_value = tick['C']\n close_times.append(last_time)\n close_values.append(last_value)\n\n return (close_times, close_values)", "def jumped_on(self):\r\n pass", "def exercise_b2_26():\r\n pass", "def untargeted(self):\n\t\tpass", "def update(self, ticks):\n pass", "def event_bc(self):\n self.evt_bc = True\n self.reschedule()" ]
[ "0.6841335", "0.67850286", "0.67163193", "0.67163193", "0.665169", "0.65407777", "0.65407777", "0.6539678", "0.61551094", "0.6077852", "0.6066676", "0.6026107", "0.5938928", "0.58871347", "0.57996696", "0.57409745", "0.57303774", "0.57297117", "0.5696213", "0.56716204", "0.5643665", "0.5634072", "0.5634072", "0.55815375", "0.55794895", "0.55415356", "0.55382663", "0.5490368", "0.54838467", "0.54838467", "0.5467319", "0.5463109", "0.5446394", "0.5445675", "0.54288256", "0.5414057", "0.5412321", "0.5403469", "0.53665435", "0.53549117", "0.5353854", "0.5353136", "0.53509444", "0.5347116", "0.5342322", "0.5341435", "0.53281826", "0.53281826", "0.5320362", "0.53042716", "0.5268897", "0.52609026", "0.5240716", "0.5236164", "0.5231797", "0.51979977", "0.5192481", "0.5186649", "0.51847744", "0.5183455", "0.5169378", "0.5168788", "0.5165473", "0.5158771", "0.5157108", "0.5145865", "0.51446587", "0.5142074", "0.51388484", "0.51190305", "0.51107216", "0.5108148", "0.51066864", "0.51056135", "0.5099727", "0.50971586", "0.50941885", "0.5090496", "0.5080101", "0.50695544", "0.5066877", "0.5065096", "0.5061316", "0.50557876", "0.5053705", "0.5051894", "0.50458944", "0.5034347", "0.50309837", "0.5025667", "0.50234085", "0.5022189", "0.5020956", "0.5018458", "0.50139284", "0.5013887", "0.50119746", "0.5009604", "0.5008027", "0.5006417", "0.5003345" ]
0.0
-1
For 371, not closing FFS for bias/dark.
def test_do_boss_calibs_one_bias_ffs_open(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) cmdState = CmdState.DoBossCalibsCmd() cmdState.nBias = 1 self._do_boss_calibs(4, 25, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close():\n\trfm.close()", "def close_trace(self):\n if self.State==1:\n self.tf.close()\n self.State = 0 \n else:\n print \"Tracefile not open...\"", "def safeClose():\n # outputToggle(ledPin, False)\n # outputToggle(auxlightPin, False)\n camera.stop_preview()\n camera.close()\n GPIO.cleanup()", "def close(self):\r\n if plt.fignum_exists(num=1):\r\n plt.close()", "def close(self):\n\n if self.fig:\n plt.close('all')\n plt.ioff()", "def close(self):\n\t\tself.applied = 0", "def cehs():\n\tcloseEHShutter()", "def close(f, b=None):\n\n if b is None: b = secross()\n y = erode(dilate(f,b),b)\n return y", "def close_datafile(fs):\r\n fs.close() # fs is the output from init_datafile\r", "def bfm_close ( cid=0\n , rigor=False\n , verbose=False ):\n global _cosim\n _bfm_close = WrapFunction( _cosim\n , 'bfm_close'\n , ctypes.c_int\n ,[ctypes.c_int])\n return _bfm_close( cid )", "def _close(self):\n # TODO\n self.holding = False", "def close():", "def _close(self):\n log.Debug('dpbx.close():')", "def stop(_):\n if \"fscad.fscad\" in sys.modules:\n del sys.modules[\"fscad.fscad\"]", "def shutdown_openafs(self):\n if get_var('AFS_DIST') == \"transarc\":\n uname = os.uname()[0]\n if uname == 'Linux':\n run_keyword(\"Stop the Cache Manager on Linux\")\n elif uname == 'SunOS':\n run_keyword(\"Stop the Cache Manager on Solaris\")\n else:\n raise AssertionError(\"Unsupported operating system: %s\" % (uname))\n run_keyword(\"Stop the bosserver\")\n else:\n run_keyword(\"Stop Service\", \"openafs-client\")\n run_keyword(\"Stop Service\", \"openafs-server\")", "def _barf_if_closed(self):\n if self.closed:\n raise ValueError(\"I/O operation on closed file\")", "def close():\n wfs.close()\n ax.close()", "def close(self):\n self.f.close()", "def close(self):\n self.f.close()", "def _Close(self):\n self._fsfat_volume = None\n self._file_object = None", "def _close_staf_handle(self):\n\n try:\n self._staf_handle.unregister()\n except STAFException, e:\n raise FatalError(\"Error unregistering with STAF, RC: {0}, \"\n \"Result: {1}\".format(e.rc, e.result))", "def close(self):\n self.pi.set_watchdog(self.gpio, 0)\n if self.either_edge_cb:\n self.either_edge_cb.cancel()\n self.either_edge_cb = None", "def Close(self):", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def close(self, *args, **kwargs):\n if self.mode == 'a':\n if self.w_uid in self.wFp:\n self.wFp[self.w_uid].flush()\n self.w_uid = None\n self.hIdx = None\n for k in list(self.wFp.keys()):\n del self.wFp[k]\n\n for k in list(self.rFp.keys()):\n del self.rFp[k]", "def _closeMedianInput(self):\n\n # Close all singly drizzled images used to create median image.\n for img in self.single_handles:\n img.close()\n self.single_list = []\n\n # Close all singly drizzled weight images used to create median image.\n for img in self.weight_handles:\n img.close()\n self.weight_list = []\n\n # If new median masks was turned on, close those files\n if self.weight_mask_list:\n for arr in self.weight_mask_list:\n del arr\n self.weight_mask_list = None", "def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def _close(self):\n self.fh.close()", "def close(self):\n # This is a NOOP by default", "def close(self):\n # This is a NOOP by default", "def close(self):\n ...", "def close(self):\n ...", "def stop_aperture(self):\n self.aperture_id = None\n self.mode = \"\"", "def close(self):\n if not self._f:\n return\n\n logger.info(\"Closed {} ({})\".format(self.name, self.num))\n\n self._f.close()\n self._f = None", "def kernelStopping(self):\n # Always call parent method to be safe.\n super().kernelStopping()\n self.writeFundamental()", "def close(self):\n # By default, this is a NOOP", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def _close_figure(self):\n if self.disp_images:\n plt.show()\n else:\n plt.close()", "def close(self):\n if self.SE == 6:\n self.evr.polarity.put('VAL', 0)\n else:\n self.S_CLOSE = 1", "def friewallOn():\n pass", "def __del__(self):\n if (\n self._fpointer is not None and not self._fpointer.closed\n ): # pragma: no mutate\n self._fpointer.close()", "def _CloseOutputFiles(self):\n self.gfile.close()\n self.efile.close()", "def _isclose(self):\n return self.dp.state()==PyTango.DevState.CLOSE", "def close(self):\n self.fout.close()", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def cleanup(self):\n if self.log_fo:\n self.log_fo.close()", "def close(self):\n self.closed = True", "def close(self) -> None:\n ...", "def close(self) -> None:\n ...", "def _close( self ):\n for raster in self._raster_data:\n if raster != []:\n raster.close()", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def close(self):\n if dev[self.id] == FLI_INVALID_DEVICE:\n raise FliError(\"Device already closed or not initialized\")\n dev[self.id] = FLI_INVALID_DEVICE\n with self.lock:\n self.status = CLOSED", "def modes_off(self):\n bm = self.fitsimage.get_bindmap()\n bm.reset_mode(self.fitsimage)", "def close(self):\n self.input_processing_running = False", "def Close(self):\n return _gmat_py.SolarFluxReader_Close(self)", "def close(self):\n self.f.close()\n if self.f_script is not None:\n self.f_script.close()", "def unload_fmu(self):\n del self.fmu", "def hard_reset(self):\n self.close()\n self.open()", "def terminate(self):\n plt.close('all')", "def kill(self):\r\n plt.close(self.fig)", "def close(self):\n self.ag.close()\n self.mag.close()", "def __window_close(self):\n pass", "def handle_close(self):\n self.active = False\n self.close()", "def close_hdf_file(self):\n\t\tself.h5f.close()", "def close_lite6_gripper(self):\r\n return self._arm.close_lite6_gripper()", "def close(self):\n self.is_open = False", "def closeth(f, b=None):\n\n if b is None: b = secross()\n return subm( close(f,b), f)", "def close(self):\n\n # Fit data to model\n self.logger.logMessageString(f\"Fit Scikit-learn model with {self.x.shape[0]} records\", fmeobjects.FME_INFORM)\n for c in self.numeric_features:\n self.x.loc[:,c] = pd.to_numeric(self.x.loc[:,c], errors='coerce')\n self.sk.pipe.fit(X=self.x, y=self.y)\n # Export model\n self.logger.logMessageString(\"Export Scikit-learn model\", fmeobjects.FME_INFORM)\n self.sk.export_model(self.output_path)\n\n self.logger.logMessageString(\"Transformer closed\", fmeobjects.FME_INFORM)\n del (self.logger) # Needed to avoid \"Not All FME sessions were destroyed\"", "def auto_close_all_figures(request):\n if \"matplotlib\" in request.keywords:\n plt.close(\"test\")\n plt.close(\"reference\")", "def close(self) -> None:\n self.f.close()", "def close(self):\r\n pass", "def cleanup_footprint(self, fpname):\n logging.debug(\"Environment: %s entered\" % __name__)\n fp = self.get_footprint(fpname, start=False)\n fp.cleanup_old_images()\n fp.save()", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\n self.closing = True", "def on_before_close(self):\n pass", "def _basicClose(self):\n raise NotImplementedError()", "def _basicClose(self):\n raise NotImplementedError()", "def _close_sd_ref():\n global SD_REF\n if SD_REF:\n SD_REF.close()\n SD_REF = None", "def soft_reset():", "def close(self):\n\t\tif self.is_open:\n\t\t\tself.hdf5file.close()\n\t\t\tself.is_open = False", "def close(self):\n pass", "def close(self):\n pass" ]
[ "0.63533455", "0.5993525", "0.59140307", "0.5904497", "0.5810159", "0.5796537", "0.5787865", "0.57800907", "0.5778978", "0.577066", "0.57619506", "0.57507795", "0.5708684", "0.5704931", "0.56797045", "0.5677076", "0.56575465", "0.56262064", "0.56262064", "0.5623001", "0.56024826", "0.55958974", "0.5586483", "0.55802757", "0.557398", "0.5560523", "0.55450076", "0.55427337", "0.55427337", "0.55427337", "0.55427337", "0.55427337", "0.55427337", "0.55427337", "0.55427337", "0.55427337", "0.55427337", "0.55413765", "0.5527032", "0.5527032", "0.5516734", "0.5516734", "0.55098176", "0.5506354", "0.545545", "0.5446782", "0.54390776", "0.54390776", "0.54390776", "0.54390776", "0.54390776", "0.54390776", "0.54390776", "0.54390776", "0.54244554", "0.5421094", "0.53968614", "0.5396196", "0.5371567", "0.5365659", "0.5361539", "0.53573966", "0.5351642", "0.5348948", "0.5348224", "0.5348224", "0.5346976", "0.5344544", "0.53406703", "0.53402776", "0.5326095", "0.53195244", "0.529132", "0.52911806", "0.5276939", "0.5273611", "0.5271409", "0.52588373", "0.5256427", "0.5252057", "0.52497786", "0.52482456", "0.5241502", "0.5236583", "0.5231451", "0.5229173", "0.5227441", "0.52227265", "0.52188444", "0.52163714", "0.52163714", "0.52163714", "0.5211802", "0.5204652", "0.51992685", "0.51992685", "0.5197952", "0.51953185", "0.5190099", "0.51870304", "0.51870304" ]
0.0
-1
For 371, not closing FFS for bias/dark.
def test_do_boss_calibs_one_dark_ffs_open(self): sopTester.updateModel('mcp', TestHelper.mcpState['boss_science']) cmdState = CmdState.DoBossCalibsCmd() cmdState.nDark = 1 self._do_boss_calibs(4, 25, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close():\n\trfm.close()", "def close_trace(self):\n if self.State==1:\n self.tf.close()\n self.State = 0 \n else:\n print \"Tracefile not open...\"", "def safeClose():\n # outputToggle(ledPin, False)\n # outputToggle(auxlightPin, False)\n camera.stop_preview()\n camera.close()\n GPIO.cleanup()", "def close(self):\r\n if plt.fignum_exists(num=1):\r\n plt.close()", "def close(self):\n\n if self.fig:\n plt.close('all')\n plt.ioff()", "def close(self):\n\t\tself.applied = 0", "def cehs():\n\tcloseEHShutter()", "def close(f, b=None):\n\n if b is None: b = secross()\n y = erode(dilate(f,b),b)\n return y", "def close_datafile(fs):\r\n fs.close() # fs is the output from init_datafile\r", "def bfm_close ( cid=0\n , rigor=False\n , verbose=False ):\n global _cosim\n _bfm_close = WrapFunction( _cosim\n , 'bfm_close'\n , ctypes.c_int\n ,[ctypes.c_int])\n return _bfm_close( cid )", "def _close(self):\n # TODO\n self.holding = False", "def close():", "def _close(self):\n log.Debug('dpbx.close():')", "def stop(_):\n if \"fscad.fscad\" in sys.modules:\n del sys.modules[\"fscad.fscad\"]", "def shutdown_openafs(self):\n if get_var('AFS_DIST') == \"transarc\":\n uname = os.uname()[0]\n if uname == 'Linux':\n run_keyword(\"Stop the Cache Manager on Linux\")\n elif uname == 'SunOS':\n run_keyword(\"Stop the Cache Manager on Solaris\")\n else:\n raise AssertionError(\"Unsupported operating system: %s\" % (uname))\n run_keyword(\"Stop the bosserver\")\n else:\n run_keyword(\"Stop Service\", \"openafs-client\")\n run_keyword(\"Stop Service\", \"openafs-server\")", "def _barf_if_closed(self):\n if self.closed:\n raise ValueError(\"I/O operation on closed file\")", "def close():\n wfs.close()\n ax.close()", "def close(self):\n self.f.close()", "def close(self):\n self.f.close()", "def _Close(self):\n self._fsfat_volume = None\n self._file_object = None", "def _close_staf_handle(self):\n\n try:\n self._staf_handle.unregister()\n except STAFException, e:\n raise FatalError(\"Error unregistering with STAF, RC: {0}, \"\n \"Result: {1}\".format(e.rc, e.result))", "def close(self):\n self.pi.set_watchdog(self.gpio, 0)\n if self.either_edge_cb:\n self.either_edge_cb.cancel()\n self.either_edge_cb = None", "def Close(self):", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def close(self, *args, **kwargs):\n if self.mode == 'a':\n if self.w_uid in self.wFp:\n self.wFp[self.w_uid].flush()\n self.w_uid = None\n self.hIdx = None\n for k in list(self.wFp.keys()):\n del self.wFp[k]\n\n for k in list(self.rFp.keys()):\n del self.rFp[k]", "def _closeMedianInput(self):\n\n # Close all singly drizzled images used to create median image.\n for img in self.single_handles:\n img.close()\n self.single_list = []\n\n # Close all singly drizzled weight images used to create median image.\n for img in self.weight_handles:\n img.close()\n self.weight_list = []\n\n # If new median masks was turned on, close those files\n if self.weight_mask_list:\n for arr in self.weight_mask_list:\n del arr\n self.weight_mask_list = None", "def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def _close(self):\n self.fh.close()", "def close(self):\n # This is a NOOP by default", "def close(self):\n # This is a NOOP by default", "def close(self):\n ...", "def close(self):\n ...", "def stop_aperture(self):\n self.aperture_id = None\n self.mode = \"\"", "def close(self):\n if not self._f:\n return\n\n logger.info(\"Closed {} ({})\".format(self.name, self.num))\n\n self._f.close()\n self._f = None", "def kernelStopping(self):\n # Always call parent method to be safe.\n super().kernelStopping()\n self.writeFundamental()", "def close(self):\n # By default, this is a NOOP", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def _close_figure(self):\n if self.disp_images:\n plt.show()\n else:\n plt.close()", "def close(self):\n if self.SE == 6:\n self.evr.polarity.put('VAL', 0)\n else:\n self.S_CLOSE = 1", "def __del__(self):\n if (\n self._fpointer is not None and not self._fpointer.closed\n ): # pragma: no mutate\n self._fpointer.close()", "def friewallOn():\n pass", "def _CloseOutputFiles(self):\n self.gfile.close()\n self.efile.close()", "def _isclose(self):\n return self.dp.state()==PyTango.DevState.CLOSE", "def close(self):\n self.fout.close()", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def cleanup(self):\n if self.log_fo:\n self.log_fo.close()", "def close(self):\n self.closed = True", "def close(self) -> None:\n ...", "def close(self) -> None:\n ...", "def _close( self ):\n for raster in self._raster_data:\n if raster != []:\n raster.close()", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def close(self):\n if dev[self.id] == FLI_INVALID_DEVICE:\n raise FliError(\"Device already closed or not initialized\")\n dev[self.id] = FLI_INVALID_DEVICE\n with self.lock:\n self.status = CLOSED", "def modes_off(self):\n bm = self.fitsimage.get_bindmap()\n bm.reset_mode(self.fitsimage)", "def close(self):\n self.input_processing_running = False", "def Close(self):\n return _gmat_py.SolarFluxReader_Close(self)", "def unload_fmu(self):\n del self.fmu", "def close(self):\n self.f.close()\n if self.f_script is not None:\n self.f_script.close()", "def hard_reset(self):\n self.close()\n self.open()", "def terminate(self):\n plt.close('all')", "def kill(self):\r\n plt.close(self.fig)", "def close(self):\n self.ag.close()\n self.mag.close()", "def __window_close(self):\n pass", "def handle_close(self):\n self.active = False\n self.close()", "def close_hdf_file(self):\n\t\tself.h5f.close()", "def close_lite6_gripper(self):\r\n return self._arm.close_lite6_gripper()", "def close(self):\n self.is_open = False", "def closeth(f, b=None):\n\n if b is None: b = secross()\n return subm( close(f,b), f)", "def close(self):\n\n # Fit data to model\n self.logger.logMessageString(f\"Fit Scikit-learn model with {self.x.shape[0]} records\", fmeobjects.FME_INFORM)\n for c in self.numeric_features:\n self.x.loc[:,c] = pd.to_numeric(self.x.loc[:,c], errors='coerce')\n self.sk.pipe.fit(X=self.x, y=self.y)\n # Export model\n self.logger.logMessageString(\"Export Scikit-learn model\", fmeobjects.FME_INFORM)\n self.sk.export_model(self.output_path)\n\n self.logger.logMessageString(\"Transformer closed\", fmeobjects.FME_INFORM)\n del (self.logger) # Needed to avoid \"Not All FME sessions were destroyed\"", "def auto_close_all_figures(request):\n if \"matplotlib\" in request.keywords:\n plt.close(\"test\")\n plt.close(\"reference\")", "def close(self) -> None:\n self.f.close()", "def close(self):\r\n pass", "def cleanup_footprint(self, fpname):\n logging.debug(\"Environment: %s entered\" % __name__)\n fp = self.get_footprint(fpname, start=False)\n fp.cleanup_old_images()\n fp.save()", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\n self.closing = True", "def on_before_close(self):\n pass", "def _basicClose(self):\n raise NotImplementedError()", "def _basicClose(self):\n raise NotImplementedError()", "def _close_sd_ref():\n global SD_REF\n if SD_REF:\n SD_REF.close()\n SD_REF = None", "def soft_reset():", "def close(self):\n\t\tif self.is_open:\n\t\t\tself.hdf5file.close()\n\t\t\tself.is_open = False", "def close(self):\n pass", "def close(self):\n pass" ]
[ "0.6353356", "0.5994088", "0.5915026", "0.59051275", "0.58110243", "0.5796057", "0.57873064", "0.57794714", "0.5778692", "0.57710266", "0.5761392", "0.57499737", "0.5708475", "0.57054716", "0.5680105", "0.56762487", "0.5658499", "0.5626224", "0.5626224", "0.5622663", "0.5603471", "0.55962205", "0.55862576", "0.55794513", "0.5574183", "0.556119", "0.5545499", "0.55422646", "0.55422646", "0.55422646", "0.55422646", "0.55422646", "0.55422646", "0.55422646", "0.55422646", "0.55422646", "0.55422646", "0.5541002", "0.55265564", "0.55265564", "0.5516461", "0.5516461", "0.55095875", "0.55063903", "0.5455144", "0.54462355", "0.54386103", "0.54386103", "0.54386103", "0.54386103", "0.54386103", "0.54386103", "0.54386103", "0.54386103", "0.5424388", "0.5420716", "0.5396106", "0.53956753", "0.53709424", "0.5365686", "0.5361003", "0.5357172", "0.5351918", "0.5348969", "0.534813", "0.534813", "0.5347794", "0.5344253", "0.5341756", "0.53410906", "0.53261435", "0.5319983", "0.529212", "0.529178", "0.52761763", "0.527425", "0.5271553", "0.5258979", "0.5256849", "0.52522296", "0.52503407", "0.52499926", "0.52417076", "0.5235864", "0.52324027", "0.52299607", "0.52274495", "0.52224743", "0.5218996", "0.5215974", "0.5215974", "0.5215974", "0.521195", "0.52043486", "0.51984304", "0.51984304", "0.51980805", "0.51942396", "0.5190559", "0.51866394", "0.51866394" ]
0.0
-1
coobserving carts should close the apogee shutter first.
def test_do_boss_calibs_one_flat_coobserve(self): cmdState = CmdState.DoBossCalibsCmd() cmdState.nFlat = 1 sopTester.updateModel('guider', TestHelper.guiderState['apogeemangaDitherLoaded']) sopTester.updateModel('mcp', TestHelper.mcpState['apogee_parked']) sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) self._do_boss_calibs(8, 38, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cart_flushed(self):\n self.fill_session_cart()\n\n session = self.client.session\n self.assertNotEqual(session['cart'], {})\n self.assertNotEqual(session['cart_cost'], 0)\n\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n\n session = self.client.session\n self.assertEqual(session['cart'], {})\n self.assertEqual(session['cart_cost'], 0)", "def test_shoppingcart_must_not_update_if_closed(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the closed shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n data[\"is_closed\"] = True\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then check for fail in update shoppingcart\n self.url = reverse(\"update-shoppingcart\")\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def discount(self, cart):", "def test_add_to_cart_item(self, app, pm):\n logging.basicConfig(filename='/home/osboxes/pytest_mobile/logs/test.log', level=logging.DEBUG, filemode=\"w\")\n app.browser.tap_button(pm.start_page.get_skip_button())\n app.browser.tap_button(pm.main_page.get_cart_button())\n result = app.browser.get_text(pm.my_cart_page.get_cart_empty_cart())\n assert result == \"Your Cart is Empty\"\n app.browser.tap_button(pm.menu_items.get_back_button())", "def checkout_cart(self, cart):\n pass", "def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')", "def test_do_apogee_science_500s_after_1000s_cart7(self):\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 1000)\n self.assertEqual(cmdState.keywords['expTime'], 1000)\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 500)\n self.assertEqual(cmdState.keywords['expTime'], 500)", "def test_do_apogee_science_1000s_after_500s_cart7(self):\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 500)\n self.assertEqual(cmdState.keywords['expTime'], 500)\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 1000)\n self.assertEqual(cmdState.keywords['expTime'], 1000)", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = float(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def close_orders(self):", "async def stocks(self, ctx):\n\t\tpass", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = int(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')", "def api_confirm_cart():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tfor row in cur.execute('SELECT * FROM cart').fetchall():\r\n\t\t(id1,title,count,price) = row['id'], row['title'], row['inventory_count'],row['price']\r\n\t\tprint(row)\r\n\t\tcur.execute('UPDATE products SET inventory_count = inventory_count-1 WHERE id=? AND inventory_count>0',(id1,))\r\n\t\tprint(\"Reduced from \"+ str(count)+\"/n\")\r\n\tp= cur.execute('SELECT * FROM products').fetchall()\r\n\tcur.execute('DELETE FROM cart;')\r\n\tconn.commit()\r\n\treturn jsonify(p)", "def test_keep_cart_on_login(self):\n request = self.factory.post('/shop/auth/login', follow=True)\n request.customer = Customer()\n request.customer.save()\n request.session = {'session_key': 'keep_cart'}\n request.user = self.bart\n old_cart = Cart.objects.get_from_request(request)\n user_logged_in.send(sender=self.bart.__class__, request=request, user=self.bart)\n new_cart = Cart.objects.get_from_request(request)\n self.assertEqual(new_cart.customer, request.customer)\n self.assertEqual(new_cart, old_cart)", "def test_flipkart_flow(self):\n try:\n self.google_page.enter_google_search_text(message.FLIPKART_TEXT)\n self.google_page.display_google_search_suggestions()\n self.google_page.press_enter_from_google_search_textbox()\n self.google_page.click_on_flipkart_link()\n \n self.flipkart_page.close_login_popup()\n self.flipkart_page.navigate_window_air_conditioners_page()\n self.flipkart_page.select_add_compare_checkbox(2)\n self.flipkart_page.select_add_compare_checkbox(3)\n self.flipkart_page.select_add_compare_checkbox(6)\n self.flipkart_page.click_on_add_compare_button()\n \n # print item details\n self.flipkart_page.print_item_details(1)\n self.flipkart_page.print_item_details(2)\n self.flipkart_page.print_item_details(3)\n\n # get compare item page url and display avalibility\n self.compare_page_url = self.driver.current_url\n self.flipkart_page.add_to_cart(1)\n self.driver.get(self.compare_page_url) \n self.flipkart_page.add_to_cart(2)\n self.driver.get(self.compare_page_url)\n self.flipkart_page.add_to_cart(3)\n self.flipkart_page.verify_avalibility_of_items_by_pincode(conf.PINCODE)\n\n print('------ Delivery details for {} pincode --------'.format(conf.PINCODE))\n self.flipkart_page.print_item_delivery_msg(1)\n self.flipkart_page.print_item_delivery_msg(2)\n self.flipkart_page.print_item_delivery_msg(3)\n\n self.flipkart_page.check_again_avalibility_of_items_by_pincode(conf.PINCODE2)\n\n print('------ Delivery details for {} pincode --------'.format(conf.PINCODE2))\n self.flipkart_page.print_item_delivery_msg(1)\n self.flipkart_page.print_item_delivery_msg(2)\n self.flipkart_page.print_item_delivery_msg(3) \n except Exception as msg:\n print(str(msg))", "def test_update_shopping_cart(self):\n food_cost = self.browser.find_element_by_id('food-cost')\n old_food_cost = int(food_cost.text)\n\n items = self.get_list_of_items()\n index = randint(1, len(items) - 1)\n list_item = self.get_item_dict(items[index])\n item_price = self.expected_contents[index]['price']\n old_cost = self.expected_contents[index]['cost']\n\n increase_by = randint(5, 10)\n directions = [\n {\n 'action': 'increase',\n 'range': range(1, increase_by + 1)\n },\n {\n 'action': 'decrease',\n 'range': range(increase_by - 1, - 1, -1)\n }\n ]\n for direction in directions:\n for i in direction['range']:\n list_item[direction['action']].click()\n sleep(0.1)\n new_cost = int(list_item['cost'].text)\n new_food_cost = int(food_cost.text)\n self.assertTrue(new_food_cost - old_food_cost ==\n new_cost - old_cost == item_price * i)", "def test_add_and_remove_two_items(self):\n login = LoginPage(self.driver) #SAUCE-LAB-5\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products[0]\n first_item: InventoryItem\n first_item.add_to_cart()\n print('\\n')\n print(first_item.get_title())\n print(first_item.get_description())\n print(first_item.get_price())\n print('*' * 80)\n second_item = inventory_page.products[4]\n second_item: InventoryItem\n second_item.add_to_cart()\n print('\\n')\n print(second_item.get_title())\n print(second_item.get_description())\n print(second_item.get_price())\n print('*' * 80)\n first_item.remove_from_cart()\n second_item.remove_from_cart()\n print(f'Products {first_item.get_title()} and {second_item.get_title()} were successfully removed')", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = float(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def outofstock_pop(self):\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'button#close-button')))\n popup=self.driver.find_element_by_css_selector('button#close-button')\n action = TouchActions(self.driver)\n action.tap(popup).perform()", "def test_basicNoSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Don't add the item\n pos.click(\"Ok\")\n \n # Confirm we aren't in a transaction\n if self.in_transaction():\n self.tc_fail(\"Unintentionally In Transaction\")\n else:\n self.log.info(\"Confirmed we are not in a transaction\")\n \n # Setup for next test\n self.recover()", "def test_shopping_cart_not_empty(self):\n expected_contents = self.fill_session_cart()\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.context['contents'], expected_contents)", "def cart_contents(request):\n cart_items = []\n total = 0\n savings = 0\n product_count = 0\n points_available = 0\n points_earned = 0\n discount_applied = request.session.get('discount_applied')\n cart = request.session.get('cart', {})\n\n # Create a new dict so that items can be removed if needed\n new_dict = {k: v for k, v in cart.items()}\n\n for item, quantity in new_dict.items():\n # Use string created in cart view to isolate model ids\n product_id = item.split(\"_\")[0]\n size_id = item.split(\"_\")[1]\n nic_id = item.split(\"_\")[2]\n\n # Retrieve relevant objects for templating and remove if\n # no longer in database\n try:\n product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item was removed from your cart as it is \\\n no longer available. Try to find a worthy replacement!')\n continue\n # Repeat for Size\n try:\n size = Size.objects.get(pk=size_id)\n except Size.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n size is no longer available. \\\n Try to find a worthy replacement!')\n continue\n # Repeat for Nicotine\n try:\n nic = Nicotine.objects.get(pk=nic_id)\n except Nicotine.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n nicotine options have changed. \\\n Try to find a worthy replacement!')\n continue\n\n # Check sale status and retrieve relevant price from Size model\n if product.on_sale:\n price = size.sale_price\n savings += (size.price - size.sale_price) * quantity\n else:\n price = size.price\n total += quantity * price\n product_count += quantity\n cart_items.append({\n 'item_id': item,\n 'product': product,\n 'size': size,\n 'nic': nic,\n 'price': price,\n 'quantity': quantity,\n })\n\n original_total = total\n request.session['cart'] = cart\n\n # Get user profile\n if request.user.is_authenticated:\n profile = get_object_or_404(UserProfile, user_id=request.user)\n\n else:\n profile = None\n\n # Check for available points\n if profile:\n points_available = profile.points\n\n # Check if user has chosen to redeem points and that the discount\n # will never take the total below zero\n if discount_applied:\n if total - Decimal(points_available / 100) <= 0:\n total = 0\n\n else:\n total -= Decimal(points_available / 100)\n\n if total < settings.FREE_DELIVERY_THRESHOLD:\n delivery = Decimal(settings.STANDARD_DELIVERY)\n free_delivery_delta = settings.FREE_DELIVERY_THRESHOLD - total\n\n else:\n delivery = 0\n free_delivery_delta = 0\n\n grand_total = delivery + total\n points_earned = int(math.floor(total))\n\n context = {\n 'cart_items': cart_items,\n 'total': total,\n 'original_total': original_total,\n 'savings': savings,\n 'product_count': product_count,\n 'delivery': delivery,\n 'free_delivery_delta': free_delivery_delta,\n 'free_delivery_threshold': settings.FREE_DELIVERY_THRESHOLD,\n 'grand_total': grand_total,\n 'points_available': points_available,\n 'discount_applied': discount_applied,\n 'points_earned': points_earned,\n }\n\n return context", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = int(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_update_cart(self, driver):\n\n logging.info(\"Start test case: Edit product in orderSummary\")\n data = self.test_data[\"Edit product in orderSummary\"]\n products = data[\"Products\"]\n logging.info(\"Test data: {}\".format(products))\n\n for i in range(len(products)):\n select_product(driver, products[i][\"Page\"], products[i][\"Product Name\"])\n add_product_to_cart(driver, products[i][\"Size\"], products[i][\"Color\"], products[i][\"Quantity\"])\n\n added_name = get_product_name(driver, index=data[\"Added Index\"] - 1)\n update_quantity_in_cart(driver, name=added_name, added_amount=data[\"Added Amount\"])\n expected_qty = get_product_detail_in_cart(driver, added_name)[\"Qty\"]\n\n removed_name = get_product_name(driver, index=data[\"Removed Index\"] - 1)\n remove_product_from_cart(driver, name=removed_name)\n expected_amt = get_product_amount_in_cart(driver)\n\n checkout_from_order_summary(driver)\n actual_amt = get_product_amount_in_order(driver)\n actual_qty = get_product_detail_in_order(driver, added_name)[\"Qty\"]\n logging.info(\"Verify product amount and product quantity on checkout page\")\n assert actual_amt == expected_amt, f\"your cart product amount is {actual_amt}, it should be {expected_amt}\"\n assert actual_qty == expected_qty, f\"The quantity of added product {added_name} is {actual_qty}, it should be {expected_qty}\"\n assert not verify_product_in_order(driver, removed_name)", "def test_remove_item_from_cart(self):\n items = self.get_list_of_items()\n index = randint(1, len(items) - 1)\n list_item = self.get_item_dict(items[index])\n\n food_cost = self.browser.find_element_by_id('food-cost')\n old_food_cost = int(food_cost.text)\n cost_loss = self.expected_contents[index]['cost']\n list_item['remove'].click()\n\n new_food_cost = int(food_cost.text)\n self.assertEqual(new_food_cost, old_food_cost - cost_loss)\n self.assert_element_stale(items[index])", "def climb(self):\n print(\"Inside WoodElf.climb\")", "def set_equivalent_cart(self, base_cart):\n\t\t# Emptying cart\n\t\tself.empty()\n\n\t\t# Getting base cart content\n\t\tcontents = base_cart.cart_content_set.all()\n\t\tbase_osm = base_cart.osm\n\t\tequivalence_store = {}\n\n\t\tfor content in contents:\n\t\t\tbase_product = content.product\n\t\t\tquantity = content.quantity\n\n\t\t\t# First, looking for a match\n\t\t\tmatch = base_product.productmatch_set.all()\n\t\t\tif len(match)>0:\n\t\t\t\tmatch = match[0]\n\t\t\t\tmathed_product = getattr(match, self.cart.osm+'_product') # Evil hack!! Or is it? I love Python :D\n\t\t\t\tif mathed_product is not None:\n\t\t\t\t\tmatch_content = self.add_product(mathed_product, quantity, is_user_added = False, is_match = True, is_suggested = False)\n\t\t\t\t\tsetattr(match_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, match_content.cart.osm+'_content', match_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': match_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': True,\n\t\t\t\t\t\t'is_suggested': False\n\t\t\t\t\t}\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmatch_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\t# print '\\tMatch : '+mathed_product.url\n\t\t\t\telse:\n\t\t\t\t\t# Look for similarities\n\t\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcontent.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\t\t\t\t\telse:\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\n\n\t\t\telse:\n\t\t\t\t# Look for similarities\n\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\t\t\t\telse:\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\n\n\t\treturn equivalence_store", "def pol_to_cart():\n pass", "def bogof_discount(self):\n bogof_discount = 0\n for item in self.cart.items:\n if item.quantity > 1:\n bogof_discount += (math.floor(item.quantity / 2) * item.product.price)\n\n self.cart._total -= bogof_discount", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def test_basicSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we added the item\n ret = self.confirm_line(-1, \"Generic Item\", \"$0.01\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()", "def test_inTransSalePC(self):\n # Start a transasction\n pos.click_speed_key(\"Generic Item\")\n \n # Void the item to an empty transaction\n # NOTE: Should uncomment this when related defect is fixed (likely in MERLIN-1335)\n #pos.click(\"Void item\")\n \n # Repeat earlier test\n self.test_basicSalePC()", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def hay_stock(self):\n return self.producto.stock_disponible >= 0", "def ingredient_used_canceled(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used canceled initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.used.id)]\n , order=[('batch_number', 'DESC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n # pdb.set_trace()\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_update_shoppingcart_view(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe the update\n self.url = reverse(\"update-shoppingcart\")\n data = { **self.shoppingcart_data }\n data[\"is_closed\"] = True\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n if response.status_code == status.HTTP_200_OK:\n r_json = response.json()\n self.assertTrue(r_json[\"cart\"][\"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])", "def test_shopping_cart_displays_total_cost(self):\n expected_cart_cost = 0\n for item in self.fill_session_cart():\n expected_cart_cost += item['price'] * item['amount']\n\n self.client.get(self.SHOP_CART_URL)\n self.assertEqual(self.client.session['cart_cost'], expected_cart_cost)", "def cehs():\n\tcloseEHShutter()", "def test_can_check_if_cart_has_item_needing_delivery(self):\n request = HttpRequest()\n engine = import_module(settings.SESSION_ENGINE)\n session_key = None\n request.session = engine.SessionStore(session_key)\n\n # below two lines of code from https://stackoverflow.com/questions/11938164/why-dont-my-django-unittests-know-that-messagemiddleware-is-installed\n # fixes bug where test fails because unittest\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n\n random_non_delivery_item_1 = choice(StoreItem.objects.filter(delivery_required=False))\n random_non_delivery_item_2 = choice(\n StoreItem.objects.filter(delivery_required=False).exclude(id=random_non_delivery_item_1.id))\n random_delivery_item = choice(StoreItem.objects.filter(delivery_required=True))\n\n cart_add(request, random_non_delivery_item_1.id)\n self.assertFalse(cart_contains_item_needing_delivery(request))\n cart_add(request, random_non_delivery_item_2.id)\n self.assertFalse(cart_contains_item_needing_delivery(request))\n cart_add(request, random_delivery_item.id)\n self.assertTrue(cart_contains_item_needing_delivery(request))", "def test_find_stock_items(self):\n pass", "def test_delete_from_cart(open_browser, mysql_executor):\n HelperUrl.user_base_url(open_browser)\n MainPage(open_browser).open_product_page()\n ProductPage(open_browser).add_to_cart()\n CheckExistenceDB(mysql_executor).check_exist()\n Header(open_browser).open_cart_block() \\\n .delete_from_cart_block()\n CheckExistenceDB(mysql_executor).check_is_not_exist()", "def test_checkout_page_ie(self):\n # login as our customer\n logged_in = self.client.login(username=self.username, password=self.password)\n self.assertEqual(logged_in, True)\n\n cart_response = self.client.get(reverse('lfs_cart'))\n self.assertContains(cart_response, self.PRODUCT1_NAME, status_code=200)\n\n checkout_response = self.client.get(reverse('lfs_checkout'))\n\n # we expect a list of irish counties in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Offaly', status_code=200)\n\n # we expect a list of american states in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Washington', status_code=200)", "def test_checkout_page_ie(self):\n # login as our customer\n logged_in = self.client.login(username=self.username, password=self.password)\n self.assertEqual(logged_in, True)\n\n cart_response = self.client.get(reverse('lfs_cart'))\n self.assertContains(cart_response, self.PRODUCT1_NAME, status_code=200)\n\n checkout_response = self.client.get(reverse('lfs_checkout'))\n\n # we expect a list of irish counties in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Offaly', status_code=200)\n\n # we expect a list of american states in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Washington', status_code=200)", "def compare_price(self):\n if self.__product_price < self.__alert_price:\n #print(\"price drop...\")\n self.__alert_client = True\n self.__price_difference = self.__product_price - self.__alert_price\n else:\n #print(\"Price not reduced...\")\n self.__alert_client = False\n self.__price_difference = self.__product_price - self.__alert_price", "def test_add_to_cart(self):\n\n # Log the user in that is not the seller\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Confirm that product title appears in cart\n response = self.client.get(reverse('website:cart'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Ensure that the cart displays product title, but not the title for product2\n self.assertIn('<h6 class=\"mr-auto p-2\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mr-auto p-2\">Test Product2</h6>'.encode(), response.content)\n\n # Confirm that the post returns a response of 302\n response = self.client.get(reverse(\"website:add_to_cart\", args=(1,)))\n self.assertEqual(response.status_code, 302)", "def test_view_cart_contents(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 200)", "def checkout():\n global total_price\n total_price = 0\n update_total_price()\n tk_items.delete(0, tk.END)\n subprocess.run(['aplay', 'img/regi2.wav'])", "def main():\n\n dict_logs = {\n 'logs/e_log.log': logging.ERROR,\n 'logs/c_log.log': logging.INFO\n }\n\n logger = create_logger(**dict_logs)\n\n # For using Chrome\n browser = webdriver.Chrome('chromedriver.exe')\n\n url = 'https://www.bestbuy.com/site/nvidia-geforce-rtx-3090-24gb-gddr6x-pci-express-4-0-' \\\n 'graphics-card-titanium-and-black/6429434.p?skuId=6429434'\n\n # BestBuy RTX 3090 page\n browser.get(url)\n\n purchased = False\n increment = 0\n\n logger.info(f'Starting Web Scraping')\n # Try to add to cart\n while not purchased:\n\n try:\n # If success, product is out of stock, don't need the return\n browser.find_element_by_class_name(\"btn-disabled\")\n increment += 1\n if increment % (60*12) == 0: # Update STDOUT every 12 hours\n logger.info(f'Product not available')\n time.sleep(60)\n browser.refresh()\n\n except NoSuchElementException as e:\n logger.error(e)\n # Product in stock\n add_to_cart_button = browser.find_element_by_class_name('btn-primary')\n # Click the button and end the script\n add_to_cart_button.click()\n send_notification(config.get('TWILIO', 'twilio_to_number'))\n purchased = True\n\n logger.info('Product is in the shopping cart')\n\n # Hold the window open until manual purchase can be made\n while True:\n pass", "def test_shoppingcart_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe update\n data = self.shoppingcart_data\n data[\"quantity\"] = 20\n data[\"discount_value\"] = 9.99\n data[\"is_closed\"] = True\n self._update_model(\"shoppingcart\", id, data, [\"quantity\", \"discount_value\", \"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)", "def test_search_shoes_item_to_buy(self):\n self.driver.find_element_by_id(\"search_query_top\").send_keys(\"shoes\")\n self.driver.find_element_by_name(\"submit_search\").click()\n self.driver.find_element_by_xpath(\n \"/html/body/div[1]/div[2]/div/div[3]/div[2]/ul/li[2]/div/div[1]/div/a[1]/img\").click()\n self.driver.find_element_by_name(\"Submit\").click()\n time.sleep(5)", "def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)", "def test_add_to_cart_item_at_minimum_stock(self):\n response = self.client.get(\n '/self.base_url/sales/2/1',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"The following product has reached the mimimum stock, please contact the admin for sales below minimum stock\")\n self.assertEqual(response.status_code,200)", "def test_putaway_after_manufacturing_3(self):\n self.laptop.tracking = 'serial'\n mo_laptop = self.new_mo_laptop()\n serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id, 'company_id': self.env.company.id})\n\n mo_form = Form(mo_laptop)\n mo_form.qty_producing = 1\n mo_form.lot_producing_id = serial\n mo_laptop = mo_form.save()\n mo_laptop.button_mark_done()\n\n # We check if the laptop go in the depot and not in the stock\n move = mo_laptop.move_finished_ids\n location_dest = move.move_line_ids.location_dest_id\n self.assertEqual(location_dest.id, self.depot_location.id)\n self.assertNotEqual(location_dest.id, self.stock_location.id)", "def test_manufacturing_scrap(self):\n\n # Update demo products\n (self.product_4 | self.product_2).write({\n 'tracking': 'lot',\n })\n\n # Update Bill Of Material to remove product with phantom bom.\n self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink()\n\n # Create Inventory Adjustment For Stick and Stone Tools with lot.\n lot_product_4 = self.env['stock.production.lot'].create({\n 'name': '0000000000001',\n 'product_id': self.product_4.id,\n 'company_id': self.env.company.id,\n })\n lot_product_2 = self.env['stock.production.lot'].create({\n 'name': '0000000000002',\n 'product_id': self.product_2.id,\n 'company_id': self.env.company.id,\n })\n\n stock_inv_product_4 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stick',\n 'product_ids': [(4, self.product_4.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.stock_location_14.id}),\n ]})\n\n stock_inv_product_2 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stone Tools',\n 'product_ids': [(4, self.product_2.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.stock_location_14.id})\n ]})\n (stock_inv_product_4 | stock_inv_product_2)._action_start()\n stock_inv_product_2.action_validate()\n stock_inv_product_4.action_validate()\n\n #Create Manufacturing order.\n production_form = Form(self.env['mrp.production'])\n production_form.product_id = self.product_6\n production_form.bom_id = self.bom_3\n production_form.product_qty = 12\n production_form.product_uom_id = self.product_6.uom_id\n production_3 = production_form.save()\n production_3.action_confirm()\n production_3.action_assign()\n\n # Check Manufacturing order's availability.\n self.assertEqual(production_3.reservation_state, 'assigned', \"Production order's availability should be Available.\")\n\n location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id,\n\n # Scrap Product Wood without lot to check assert raise ?.\n scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id})\n with self.assertRaises(UserError):\n scrap_id.do_scrap()\n\n # Scrap Product Wood with lot.\n self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id})\n\n #Check scrap move is created for this production order.\n #TODO: should check with scrap objects link in between", "def mock_candle_producers(state: SharedState):", "def test_cancel_shipment_old(self):\n pass", "def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')", "def simulate(tracks, carts):\n collisions = set()\n spots = set((c[1], c[0]) for c in carts)\n new_carts = []\n while carts:\n cart = heappop(carts)\n y, x, = cart[0:2]\n if (x, y) in collisions:\n continue\n spots.remove((x, y))\n\n new_cart = move(cart, tracks)\n y, x = new_cart[0:2]\n if (x, y) in spots:\n collisions.add((x, y))\n spots.add((x, y))\n continue\n spots.add((x, y))\n heappush(new_carts, new_cart)\n\n carts = new_carts\n return carts, collisions", "def on_market_info(self):\n pass", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def test_get_cart(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', True)\n self.assertEqual(self.cart_item_manager.get_cart(user_id, cart_id),\n self.dynamo_accessor.get_item(config.dynamo_cart_table_name,\n keys={'UserId': user_id, 'CartId': cart_id}))", "def show_cart(update, context):\n bot = context.bot\n query = update.callback_query\n\n chat_id = update.effective_chat.id\n user = update.effective_user\n\n # all items ordered by user in message and his price to pay for them\n message_and_price = str_user_cart(chat_id, user.id)\n # InlineKeyboard back to start menu\n keyboard = [[InlineKeyboardButton(\"back to menu\", callback_data=str(ONE))]]\n # change last message send by bot\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message_and_price['message'],\n reply_markup=InlineKeyboardMarkup(keyboard))\n # notify ConversationHandler of SEVENTH stage\n return SEVENTH", "def sell():\n return apology(\"TODO\")", "def test_add_to_cart(self):\n\n # test sale item that can be sold\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"These are the items on your Cart\")\n self.assertEqual(response.status_code,200)", "def test_update_cart_name_same_name(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart1'})", "def EmptyCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")", "def test_shoppingcart_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performing detail\n self._detail_model(\"shoppingcart\", self.shoppingcart_data, id, [\"quantity\", \"discount_value\", \"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def main():\n root = Tk()\n login_window(root, \"Alcohol Store\", \"250x150\")\n root.iconbitmap('picture/Alcohol for all-logos_transparent.ico')\n root.mainloop()\n # try:\n #\n # logger.info(\"StoreApp Ready\")\n #\n # store = Store()\n # #store.display_drinks_in_store()\n #\n #\n #\n #\n # #Under age example\n # #--------------------------------------------------------------------\n # drink = Drink(\"Wiski\", \"RedLabel\", 1055, 500, 0)\n # buyer = Buyer(\"Wihbe\", 2055, 5022, 30)\n # sale = Sale(558956)\n #\n # wihbe_product_sale = ProductSale(5, sale, drink, buyer)\n # store.sell_product(wihbe_product_sale)\n #\n # #Not exist in the stock example.\n # #--------------------------------------------------------------------\n #\n # buyer_amit = Buyer(\"Amit\", 20365899, 504808196, 80)\n # amit_product_sale = ProductSale(5, sale, drink, buyer_amit)\n # store.sell_product(amit_product_sale)\n #\n # #Purchase example.\n # #--------------------------------------------------------------------\n # supplier = Supplier(\"Chen\",\"Haifa\", 11, 559842658)\n # product_purchase = ProductPurchase(drink, supplier, \"Buying vodka\", 3)\n # store.product_purchase_from_supplier(product_purchase)\n #\n # #Selling not enough products.\n # #--------------------------------------------------------------------\n # store.sell_product(amit_product_sale)\n #\n # #Another purchase\n # #--------------------------------------------------------------------\n # store.product_purchase_from_supplier(product_purchase)\n #\n # #Selling with the right amount.\n # #--------------------------------------------------------------------\n # store.sell_product(amit_product_sale)\n #\n # #Display drink in store.\n # #--------------------------------------------------------------------\n # store.display_drinks_in_store()\n #\n # #Display Purchase and Selling history in store.\n # #--------------------------------------------------------------------\n # store.display_product_purchases_from_supplier()\n #\n #\n # logger.info(\"Finshed running\")\n #\n # except Exception as e:\n # exc_traceback = sys.exc_info()[2] # Full traceback address\n #\n # print(\n # \"\\n=======================================================================================================================\")\n # logger.error(\"Exception 'e': {0} -- Error started in line: {1}\".format(e, exc_traceback.tb_lineno))\n # print(\n # \"=======================================================================================================================\\n\")\n #\n # print(\n # \"=======================================================================================================================\")\n # full_traceback = traceback.format_tb(exc_traceback)\n # logger.error(\"main -> Exception: printing full traceback stack\")\n # # full_traceback.reverse()\n # for error_trace in full_traceback:\n # # trace_arrange = error_trace.split(',')\n # trace_arrange = re.split(', |\\n', error_trace)\n # logger.error(\"Method: {0} -- {1} -- Error: {2} -- {3}\".format(trace_arrange[2], trace_arrange[1],\n # trace_arrange[3].strip(), trace_arrange[0]))\n #\n # print(\n # \"=======================================================================================================================\\n\")\n # logger.info(r'Log file save in: {0}\\{1}.log'.format(get_working_dir(), get_project()))\n #\n # finally:\n # release_logger()", "def test_add_stock_item(self):\n pass", "def test_can_not_access_checkout_with_empty_cart(self):\n responses = [self.client.post(self.CHECKOUT_URL, follow=True),\n self.client.get(self.CHECKOUT_URL, follow=True)]\n\n for response in responses:\n self.assertRedirects(response, reverse('orders:shopping_cart'))\n message = list(response.context.get('messages'))[0]\n self.assertEqual(message.tags, 'error')\n self.assertTrue(\"Your cart is empty.\" in message.message)", "def handle_item_consumption(self):\n self.tooltip_focus = None\n self.active_item_index = None\n self.refresh_inventory()\n self.refresh_equipment()", "def cart(request):\n \"\"\"crt = Cart(request)\n for item in crt:\n if item['quantity'] > 1:\n item['quantity'] = 1\"\"\"\n return {'cart': Cart(request)}", "def hook_buy_this_card(self, game, player):\n totrash = [c for c in player.piles[Piles.PLAYED] if c.isTreasure()]\n for c in totrash:\n player.output(f\"Mint trashing {c.name}\")\n player.trash_card(c)", "def test_classical_as_conductor(self):\n self.add_mp3(artist='Artist 1', title='Title 1',\n group='Group 1', conductor='Conductor 1', composer='Composer 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 1', title='Title 2',\n group='Group 2', conductor='Conductor 2', composer='Composer 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.run_add()\n\n self.assertEqual(Artist.objects.count(), 8)\n artist = Artist.objects.get(name='Conductor 2')\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n song_1 = Song.objects.get(title='Title 1')\n song_2 = Song.objects.get(title='Title 2')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertContains(response, '1 album')\n self.assertQuerysetEqual(response.context['songs'].data, [repr(song_2)])\n self.assertContains(response, '1 song')\n self.assertContains(response, str(album))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n \n for artist in Artist.objects.exclude(name='Various'):\n self.assertContains(response, str(artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n\n self.assertNotContains(response, str(song_1))\n self.assertNotContains(response, song_1.get_download_url_html5())\n self.assertNotContains(response, song_1.get_download_url_m3u())\n self.assertContains(response, str(song_2))\n self.assertContains(response, song_2.get_download_url_html5())\n self.assertContains(response, song_2.get_download_url_m3u())", "def get_similar_products(list):\n #initialize cart with random ASIN\n params = {\"Item.1.ASIN\":'B000DLB2FI', 'Item.1.Quantity':1}\n cart = amazon.CartCreate(**params)\n root = objectify.fromstring(cart)\n cartid = _safe_get_element_text('Cart.CartId', root)\n hmac = _safe_get_element_text('Cart.HMAC', root)\n\n #create empty list of similar products\n sblist = []\n \n count = 0 #testing\n\n #iterate through list of original ASINs and retrieve also bought products\n print 'Retrieving \\\"Also Bought\\\" Products!' #testing\n for item in list:\n #add to cart\n amazon.CartClear(CartId=cartid, HMAC=hmac)\n params = {\"Item.1.ASIN\":item, 'Item.1.Quantity':1, 'CartId':cartid, 'HMAC':hmac, 'ResponseGroup':'Cart,CartSimilarities'}\n cart = amazon.CartAdd(**params)\n root = objectify.fromstring(cart)\n \n count +=1 #testing\n print count #testing\n \n #iterate through each similar product and add to list\n if \"SimilarProduct\" in cart:# HOW TO ACCOUNT FOR NO SIMILAR PRODUCTS\n for item2 in root.Cart.SimilarProducts.SimilarProduct:\n if _safe_get_element_text('Title', item2) is not None:\n sblist.append({'Original ASIN' : item,\n 'Associated ASIN' : item2.ASIN,\n 'Title' : item2.Title,\n 'Price' : None,\n 'Currency Code' : None,\n 'Relationship' : \"Also Bought\"})\n\n print 'Total # of \\\"Also Bought\\\" Products: ' + str(len(sblist)) #for testing\n count = 0 #testing\n \n #iterate through each similar prodcut and obtain highest price\n print 'Retrieving prices!' #testing\n for item in sblist:\n if item['Title'] is not None:\n title = filter(lambda x: x in string.printable, item['Title'].text) #remove non-ascii\n item['Title'] = title\n \n count+=1 #testing\n print count #testing\n\n pricelist = amazon.ItemLookup(ItemId=item['Associated ASIN'],ResponseGroup=\"OfferSummary,VariationSummary\")\n priceroot = objectify.fromstring(pricelist)\n #conditionals to check if parent or child ASIN or OOS\n if _safe_get_element_text(\"Items.Item.VariationSummary.HighestPrice.FormattedPrice\", priceroot) is not None: #Parent ASIN\n item['Price'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', priceroot)\n item['Currency Code'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.CurrencyCode', priceroot)\n elif _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.FormattedPrice\", priceroot) is not None: #Child ASIN\n #save price and currency in case no other sellers\n price = _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.FormattedPrice\", priceroot)\n currencycode = _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.CurrencyCode\", priceroot)\n amazon.CartClear(CartId=cartid, HMAC=hmac)\n params = {\"Item.1.ASIN\":item['Associated ASIN'], 'Item.1.Quantity':1, 'CartId':cartid, 'HMAC':hmac}\n cart = amazon.CartAdd(**params)\n rootcart = objectify.fromstring(cart)\n parentASIN = _safe_get_element_text(\"Cart.ParentASIN\",rootcart) #get Parent ASIN\n parentproduct = amazon.ItemLookup(ItemId=parentASIN, ResponseGroup=\"OfferSummary,VariationSummary\")\n rootparent = objectify.fromstring(parentproduct)\n #No way to obtain highest price without through VariationSummary\n if _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', rootparent) is not None:\n item['Price'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', rootparent)\n item['Currency Code'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.CurrencyCode', rootparent)\n else:\n item['Price'] = price\n item['Currency Code'] = currencycode\n\n return sblist", "def test_classical_as_conductor_various(self):\n self.add_mp3(artist='Artist 1', title='Title 1',\n group='Group 1', conductor='Conductor 1', composer='Composer 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 2', title='Title 2',\n group='Group 2', conductor='Conductor 2', composer='Composer 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.run_add()\n\n self.assertEqual(Artist.objects.count(), 9)\n artist = Artist.objects.get(name='Conductor 2')\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n song_1 = Song.objects.get(title='Title 1')\n song_2 = Song.objects.get(title='Title 2')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertContains(response, '1 album')\n self.assertQuerysetEqual(response.context['songs'].data, [repr(song_2)])\n self.assertContains(response, '1 song')\n self.assertContains(response, str(album))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n \n for artist in Artist.objects.exclude(name='Artist 1'):\n self.assertContains(response, str(artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n for artist in Artist.objects.filter(name='Artist 1'):\n self.assertNotContains(response, str(artist))\n self.assertNotContains(response, reverse('exordium:artist', args=(artist.normname,)))\n\n self.assertNotContains(response, str(song_1))\n self.assertNotContains(response, song_1.get_download_url_html5())\n self.assertNotContains(response, song_1.get_download_url_m3u())\n self.assertContains(response, str(song_2))\n self.assertContains(response, song_2.get_download_url_html5())\n self.assertContains(response, song_2.get_download_url_m3u())", "def test_cook_set_free(cook_busy, product_for_cook):\n cook_busy.set_free(True)\n # if product needs to be cooked\n assert product_for_cook.get_need_cook_status() is True\n cook_busy.cook_dish(product_for_cook)\n assert product_for_cook.get_need_cook_status() is False", "def test_add_all(self): #SAUCE-LAB-7\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('\\n')\n print('Not all items were added')", "def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def test_cancel_shipment(self):\n pass", "def __init__(self):\n self._state: CartState = None", "def test_shopping_cart_is_empty(self):\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your shopping cart is empty.\")\n self.assertQuerysetEqual(response.context['contents'], [])", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)", "def test_free_product(self):\n product = self.create_product(price=D('0.00'))\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n self.assertEqual(cs_data['amount'], '0.00')\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)", "def update(self, context):\n session = Session()\n\n for sub in self.cart:\n offers = session.search(sub.query, self.lat, self.lon, self.radius)\n for offer in sub.handle_offers(offers):\n context.bot.send_message(self.chat_id, text=offer_text(offer))\n\n updates = sub.check_offers()\n for offer in updates['expired']:\n context.bot.send_message(self.chat_id,\n text=offer_text_expired(offer))\n for offer in updates['expiring']:\n context.bot_send_message(self.chat_id,\n text=offer_text_expiring(offer))\n self.config_updated()", "def is_out_of_stock(self) -> bool:\n return self.on_hand == 0", "def next(self):\r\n if self.position.size == 0:\r\n # The condition for activating BUY function --> By checking oversold condition.\r\n if self.rsi_2 < 30 and self.rsi_3 < 40:\r\n self.buyAlert = True\r\n # If BUY is activated and below conditions are met, then aa buy order would be placed.\r\n if self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and self.buyAlert:\r\n size = round((self.broker.getcash() / self.data), 3)\r\n self.order = self.buy(size=size)\r\n self.buyAlert = False\r\n print(round(self.broker.get_cash(), 1))\r\n # print(self.datas[0].low[0])\r\n\r\n if self.position.size != 0:\r\n # The condition for activating SELL_1 function --> Waiting for RSI to reach overbought zone.\r\n if self.rsi_4 > 67:\r\n self.sellAlert1 = True\r\n # If SELL_1 is activated and below conditions are met, then a sell order would be placed.\r\n if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:\r\n self.close()\r\n self.successNum += 1\r\n self.sellAlert1 = False\r\n\r\n # The condition for activating SELL_2 function --> Activated at overbought condition with RSI>85\r\n if self.rsi_4 > 85:\r\n self.sellAlert2 = True\r\n # If SELL_2 is activated and below conditions are met, then a sell order would be placed.\r\n if (self.rsi_4 < 80) and self.sellAlert2:\r\n self.close()\r\n self.successNum += 1\r\n self.sellAlert1 = False\r\n self.sellAlert2 = False\r\n\r\n # Setting Stop Loss for wrongly opened position.\r\n if 0.82 * self.order.executed.price > self.datas[0].close > 0.8 * self.order.executed.price:\r\n self.close()\r\n self.failureNum += 1\r\n print('Shit !!! Failed for {} times.'.format(self.failureNum))", "def test_6(self):\n toothpaste = Store.Product(11, \"toothpaste\", \"dental\", 2, 4)\n milk = Store.Product(12, \"milk\", \"dairy\", 2, 3)\n eggs = Store.Product(14, \"eggs\", \"dairy\", 2, 2)\n apple_juice = Store.Product(13, \"apple juice\", \"drink\", 1, 1)\n\n s = Store.Store()\n s.add_product(toothpaste)\n s.add_product(milk)\n s.add_product(eggs)\n s.add_product(apple_juice)\n\n henry = Store.Customer(\"henry\", \"mrh\", False)\n s.add_member(henry)\n\n s.add_product_to_member_cart(11, \"mrh\")\n s.add_product_to_member_cart(12, \"mrh\")\n s.add_product_to_member_cart(14, \"mrh\")\n self.assertAlmostEqual(s.check_out_member(\"mrh\"), 6.42, \"not the correct checkout amount\")", "def GetCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def is_buy(self):\n return(copysign(1, self.volume)>0)", "def calculate_signals(self, event):\n if event.type == 'MARKET':\n for s in self.symbol_list:\n highs = self.bars.get_latest_bars_values(\n s, \"high\", N=self.long_window\n )\n lows = self.bars.get_latest_bars_values(\n s, \"low\", N=self.long_window\n )\n close = self.bars.get_latest_bar_value(s, 'close')\n bar_date = self.bars.get_latest_bar_datetime(s)\n bar_date = datetime.strptime(bar_date[:-4], \"%Y-%m-%dT%H:%M:%S.%f\")\n if highs is not None and len(highs) == self.long_window and \\\n lows is not None and len(lows) == self.long_window:\n\n # close all orders before the end of weekend, Friday 17:00 in this case\n # uncomment this chunk of code if not\n # if bar_date.weekday() == 4 and bar_date.hour is 17:\n # action = ActionEvent(s, 'CLOSE_ALL')\n # self.events.put(action)\n # return\n R_max = np.max(highs[-self.short_window:])\n R_min = np.min(lows[-self.short_window:])\n R = (R_max - R_min) * 10000\n R = round(R, 1)\n\n R2_max = np.max(highs[-self.long_window:])\n R2_min = np.min(lows[-self.long_window:])\n R2 = (R2_max - R2_min) * 10000\n R2 = round(R2, 1)\n\n real_date = bar_date+timedelta(hours=4)\n # print('<----- K 线时间 {} -----> (当前实际时间是 {} 的第一秒)'.format(bar_date, real_date))\n # print('过去 {} 个小时, 最高价是 {}, 最低价是 {}. 波动值 R2 是 {} 个 Pips.'.format( 4*self.long_window, R2_max, R2_min, R2))\n if R2 < self.c1 or R2 > self.c2:\n # print('当前 R2 波动值不满足限制条件: {} < R2 < {}'.format(self.c1, self.c2))\n # print('不交易,略过。\\n\\n')\n return\n\n # print('当前 R2 波动值满足限制条件: {} < R2 < {} \\n'.format(self.c1, self.c2))\n # print('过去 {} 个小时, 最高价是 {}, 最低价是 {}. 波动值 R 是 {} 个 Pips.'.format( 4*self.short_window, R_max, R_min, R))\n\n buy_under = round(self.k1 * R, 1)\n limit_price = round(close - buy_under/10000, 5)\n # print('当前价格是 {}. {} 倍的 R 是 {} 个 pips '.format(close,self.k1, buy_under))\n # print('开一个限价的买单 (Limit Buy Order) 在当前价格 {} 的 {} 个 pips 之下,即 {}.'.format(close, buy_under, limit_price))\n\n profit_target = round(self.k2 * R, 1)\n # print('目标盈利 ( profit_target ) 是 {} 倍的 R,即 {} 个 pips.'.format(self.k2, profit_target))\n profit_target = round(limit_price + profit_target / 10000, 5)\n # print('即, {}'.format(profit_target))\n # print('止损 (stop_loss) 为固定的 {} 个 pips.'.format(self.sl))\n stop_loss = round(limit_price - self.sl / 10000, 5)\n # print('即, {}'.format(stop_loss))\n signal_type = 'LONG'\n signal = SignalEvent(s, real_date, signal_type, 'LMT',\n limit_price, stop_loss, profit_target)\n self.events.put(signal)" ]
[ "0.615932", "0.57919484", "0.5666151", "0.5642451", "0.56404835", "0.55797815", "0.55783755", "0.5517197", "0.55146784", "0.54833806", "0.5479677", "0.5449196", "0.54028076", "0.5392104", "0.5381052", "0.53170824", "0.52840286", "0.52400047", "0.5232583", "0.5214879", "0.52071244", "0.5181847", "0.5168226", "0.51623434", "0.51592785", "0.5142132", "0.5128647", "0.51267433", "0.5119796", "0.5116172", "0.50876284", "0.50744957", "0.5061141", "0.5037112", "0.503548", "0.5031223", "0.5028667", "0.5024628", "0.5020898", "0.5007211", "0.49986914", "0.4998437", "0.49953857", "0.4987651", "0.4987651", "0.4982109", "0.49813145", "0.49761087", "0.49745387", "0.49695718", "0.4953883", "0.49402753", "0.49264944", "0.49248073", "0.4915938", "0.4906754", "0.48993754", "0.48902234", "0.48899645", "0.4885488", "0.48848987", "0.48830608", "0.48785338", "0.487064", "0.4869007", "0.48609835", "0.48562664", "0.48481417", "0.4841158", "0.48395136", "0.48388767", "0.48358122", "0.48315302", "0.48281914", "0.48207927", "0.48116645", "0.4800284", "0.4797833", "0.47892362", "0.47805265", "0.47714406", "0.4770407", "0.47624367", "0.476172", "0.4759646", "0.47569507", "0.47553572", "0.47526485", "0.47520918", "0.47517872", "0.47481897", "0.47346056", "0.4734354", "0.4733509", "0.47329834", "0.47263604", "0.47262317", "0.47239143", "0.4723056", "0.47220653", "0.47216707" ]
0.0
-1
Coobserving carts should not bother with the apogee shutter when the gang connector is not at the cart.
def test_do_boss_calibs_one_flat_coobserve_gangPodium(self): cmdState = CmdState.DoBossCalibsCmd() cmdState.nFlat = 1 sopTester.updateModel('guider', TestHelper.guiderState['apogeemangaDitherLoaded']) sopTester.updateModel('mcp', TestHelper.mcpState['all_off']) sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) self._do_boss_calibs(7, 31, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basicNoSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Don't add the item\n pos.click(\"Ok\")\n \n # Confirm we aren't in a transaction\n if self.in_transaction():\n self.tc_fail(\"Unintentionally In Transaction\")\n else:\n self.log.info(\"Confirmed we are not in a transaction\")\n \n # Setup for next test\n self.recover()", "def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')", "def test_cart_flushed(self):\n self.fill_session_cart()\n\n session = self.client.session\n self.assertNotEqual(session['cart'], {})\n self.assertNotEqual(session['cart_cost'], 0)\n\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n\n session = self.client.session\n self.assertEqual(session['cart'], {})\n self.assertEqual(session['cart_cost'], 0)", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_goto_field_apogee_bypass_gangToCart(self):\n self._prep_bypass('gangToCart', clear=True)\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 44, 4, 0, cmdState)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_shopping_cart_not_empty(self):\n expected_contents = self.fill_session_cart()\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.context['contents'], expected_contents)", "def check_cart(cart):\n return 0 <= cart[0] < grid_size and 0 <= cart[1] < grid_size and 0 <= cart[2] < grid_size", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def cart_excluded(self,cart):\n\t\tfor ex_cart in self.excludes['cart_exclude']:\n\t\t\tif cart == ex_cart:\n\t\t\t\tprint \" \u001b[43mExcluding:\u001b[m %s (File list will be pulled from the database)\" % (cart)\n\t\t\t\treturn True\n\t\treturn False", "def is_starving(self):\n return self.sugar < 0", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def _should_bypass_reservation(self):\n should_bypass_reservation = super(StockMove, self)._should_bypass_reservation()\n if not should_bypass_reservation and self.is_subcontract:\n return True\n return should_bypass_reservation", "def is_out_of_stock(self) -> bool:\n return self.on_hand == 0", "def isGasBoiler(self):\n if self.getTER1() == 0 and self.getTER2() == 0:\n return 1 #gas boiler\n else:\n return 0", "def discount(self, cart):", "def bogof_discount(self):\n bogof_discount = 0\n for item in self.cart.items:\n if item.quantity > 1:\n bogof_discount += (math.floor(item.quantity / 2) * item.product.price)\n\n self.cart._total -= bogof_discount", "def is_market(self):\n return(not self.is_pending)", "def is_buy(self):\n return(copysign(1, self.volume)>0)", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def test_can_check_if_cart_has_item_needing_delivery(self):\n request = HttpRequest()\n engine = import_module(settings.SESSION_ENGINE)\n session_key = None\n request.session = engine.SessionStore(session_key)\n\n # below two lines of code from https://stackoverflow.com/questions/11938164/why-dont-my-django-unittests-know-that-messagemiddleware-is-installed\n # fixes bug where test fails because unittest\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n\n random_non_delivery_item_1 = choice(StoreItem.objects.filter(delivery_required=False))\n random_non_delivery_item_2 = choice(\n StoreItem.objects.filter(delivery_required=False).exclude(id=random_non_delivery_item_1.id))\n random_delivery_item = choice(StoreItem.objects.filter(delivery_required=True))\n\n cart_add(request, random_non_delivery_item_1.id)\n self.assertFalse(cart_contains_item_needing_delivery(request))\n cart_add(request, random_non_delivery_item_2.id)\n self.assertFalse(cart_contains_item_needing_delivery(request))\n cart_add(request, random_delivery_item.id)\n self.assertTrue(cart_contains_item_needing_delivery(request))", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def test_login_with_nonempty_cart(client):\n raise NotImplemented('Acceptance test failed')", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = float(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_can_not_access_checkout_with_empty_cart(self):\n responses = [self.client.post(self.CHECKOUT_URL, follow=True),\n self.client.get(self.CHECKOUT_URL, follow=True)]\n\n for response in responses:\n self.assertRedirects(response, reverse('orders:shopping_cart'))\n message = list(response.context.get('messages'))[0]\n self.assertEqual(message.tags, 'error')\n self.assertTrue(\"Your cart is empty.\" in message.message)", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = int(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_inTransSalePC(self):\n # Start a transasction\n pos.click_speed_key(\"Generic Item\")\n \n # Void the item to an empty transaction\n # NOTE: Should uncomment this when related defect is fixed (likely in MERLIN-1335)\n #pos.click(\"Void item\")\n \n # Repeat earlier test\n self.test_basicSalePC()", "def compare_price(self):\n if self.__product_price < self.__alert_price:\n #print(\"price drop...\")\n self.__alert_client = True\n self.__price_difference = self.__product_price - self.__alert_price\n else:\n #print(\"Price not reduced...\")\n self.__alert_client = False\n self.__price_difference = self.__product_price - self.__alert_price", "def test_shoppingcart_must_not_update_if_closed(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the closed shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n data[\"is_closed\"] = True\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then check for fail in update shoppingcart\n self.url = reverse(\"update-shoppingcart\")\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def is_clayey(self):\n group_index = self._data[SoilProperty.GI]\n return group_index[0] not in ['S','G']", "def _should_bypass_reservation(self, location):\n should_bypass_reservation = super(StockMoveLine, self)._should_bypass_reservation(location)\n if not should_bypass_reservation and self.move_id.is_subcontract:\n return True\n return should_bypass_reservation", "def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')", "def test_buyTicket_NotForSale():\n old_venue_balance = testVenue.wallet\n assert not testUser2.buyTicket(testTicket2)\n assert testTicket2 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500\n assert testVenue.wallet == old_venue_balance", "def needRestock(self):\n # TODO check if the quantity<threshold and return true if it is\n # we'll set for now the threshold at *five* items\n # so we need to check if self.quantity is less than five.\n\n threshold = 5\n if self.quantity < threshold or PerishableStockItem.pastSellByDate(self):\n return True\n else:\n return False", "def is_buy(order):\n return(copysign(1, order.amount)>=0)", "def hook_gain_this_card(self, game, player):\n empties = sum(1 for st in game.cardpiles if game[st].is_empty())\n for _ in range(empties):\n player.gain_card(\"Gold\")", "def hook_buy_this_card(self, game, player):\n totrash = [c for c in player.piles[Piles.PLAYED] if c.isTreasure()]\n for c in totrash:\n player.output(f\"Mint trashing {c.name}\")\n player.trash_card(c)", "def is_bear_market(self):\n return self.port_return(self.data.last('2M')) <= -.2", "def skip(self):\n if self._energy < self._be_cost:\n return\n\n self._energy = self._energy - self._be_cost\n self._env.simulate()", "def check_dead(cart):\n id = cart_to_loc(cart)\n return voxel_data[id] == 0", "def test_no_support_different_sectors(self):\n battle = self.battle\n self.carol.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.carol, 2, hinder=False)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_free_product(self):\n product = self.create_product(price=D('0.00'))\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n self.assertEqual(cs_data['amount'], '0.00')\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = float(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_keep_cart_on_login(self):\n request = self.factory.post('/shop/auth/login', follow=True)\n request.customer = Customer()\n request.customer.save()\n request.session = {'session_key': 'keep_cart'}\n request.user = self.bart\n old_cart = Cart.objects.get_from_request(request)\n user_logged_in.send(sender=self.bart.__class__, request=request, user=self.bart)\n new_cart = Cart.objects.get_from_request(request)\n self.assertEqual(new_cart.customer, request.customer)\n self.assertEqual(new_cart, old_cart)", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = int(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_no_enable_paid_course_registration(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def is_spare(self):\n if self.is_strike():\n return False\n\n return (self.first_ball + self.second_ball) == 10", "def test_curse(self):\n self.plr.piles[Piles.DECK].set(\"Estate\")\n self.plr.gain_card(\"Cursed Village\")\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Curse\"])\n self.assertIn(\"Estate\", self.g.trashpile)", "def check_stock(self):\n if self.quantity > self.item.quantity:\n return \"%s Please adjust your cart.\" % CartItem.get_insufficient_stock_msg(self.item.quantity)\n return None", "def test_bonus_disc_no_bonus(self):\r\n gm_discs = set([1, 2])\r\n sp_discs = set([1, 2])\r\n self.assertFalse(gmspotify._bonus_disc_added(gm_discs, sp_discs))", "def test_play_nobane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\")\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertIn(self.g[self.g._bane].cost, (2, 3))\n self.assertEqual(self.attacker.piles[Piles.HAND].size(), 5 + 2 - 2)\n self.assertIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def checkout_cart(self, cart):\n pass", "def test_flipkart_flow(self):\n try:\n self.google_page.enter_google_search_text(message.FLIPKART_TEXT)\n self.google_page.display_google_search_suggestions()\n self.google_page.press_enter_from_google_search_textbox()\n self.google_page.click_on_flipkart_link()\n \n self.flipkart_page.close_login_popup()\n self.flipkart_page.navigate_window_air_conditioners_page()\n self.flipkart_page.select_add_compare_checkbox(2)\n self.flipkart_page.select_add_compare_checkbox(3)\n self.flipkart_page.select_add_compare_checkbox(6)\n self.flipkart_page.click_on_add_compare_button()\n \n # print item details\n self.flipkart_page.print_item_details(1)\n self.flipkart_page.print_item_details(2)\n self.flipkart_page.print_item_details(3)\n\n # get compare item page url and display avalibility\n self.compare_page_url = self.driver.current_url\n self.flipkart_page.add_to_cart(1)\n self.driver.get(self.compare_page_url) \n self.flipkart_page.add_to_cart(2)\n self.driver.get(self.compare_page_url)\n self.flipkart_page.add_to_cart(3)\n self.flipkart_page.verify_avalibility_of_items_by_pincode(conf.PINCODE)\n\n print('------ Delivery details for {} pincode --------'.format(conf.PINCODE))\n self.flipkart_page.print_item_delivery_msg(1)\n self.flipkart_page.print_item_delivery_msg(2)\n self.flipkart_page.print_item_delivery_msg(3)\n\n self.flipkart_page.check_again_avalibility_of_items_by_pincode(conf.PINCODE2)\n\n print('------ Delivery details for {} pincode --------'.format(conf.PINCODE2))\n self.flipkart_page.print_item_delivery_msg(1)\n self.flipkart_page.print_item_delivery_msg(2)\n self.flipkart_page.print_item_delivery_msg(3) \n except Exception as msg:\n print(str(msg))", "def test_add_to_cart_item(self, app, pm):\n logging.basicConfig(filename='/home/osboxes/pytest_mobile/logs/test.log', level=logging.DEBUG, filemode=\"w\")\n app.browser.tap_button(pm.start_page.get_skip_button())\n app.browser.tap_button(pm.main_page.get_cart_button())\n result = app.browser.get_text(pm.my_cart_page.get_cart_empty_cart())\n assert result == \"Your Cart is Empty\"\n app.browser.tap_button(pm.menu_items.get_back_button())", "def test_shopping_cart_is_empty(self):\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your shopping cart is empty.\")\n self.assertQuerysetEqual(response.context['contents'], [])", "def pol_to_cart():\n pass", "def test_partial_deck_doesnt_have_ignored_cards(self):\n self.assertEqual(self.ignoredCardPresent, False)", "def stoploss(self):\n price = float(self.price)\n print(\"orderPrice1:\",self.dentry[\"orderPrice1\" + self.chartnumber])\n if (self.dentry[\"orderPrice1\" + self.chartnumber] - price) / self.dentry[\"orderPrice1\" + self.chartnumber] * 100 >= self.stopPercent:\n self.sell(stopped=True)", "def test_lta_good(self):\n self.assertIsNone(api.inventory.check(self.lta_order_good))", "def EmptyCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def check_trying_using(self):\r\n if self.opportunity or 'key' in inventory:\r\n if self.rect.colliderect(player):\r\n music_acceptor.usingPortalSound()\r\n player.rect.x = random.randrange(75, WIDTH - 125)\r\n player.rect.y = random.randrange(25, HEIGHT - 100)", "def is_ignored(self):", "def test_fifo_with_nones(self):\n # Leave quant1, quant 2 with `in_date: False`\n # Leave quant 2 with no package, set quant1 and quant2 packages.\n self.quant1.write({\"package_id\": self.pack1.id})\n self.quant3.write({\"package_id\": self.pack3.id, \"in_date\": datetime.now()})\n\n # Reserve quantity - one apple\n reserved_quants = self.Quant._update_reserved_quantity(\n self.apple, self.test_stock_location_01, 1\n )\n reserved_quant = reserved_quants[0][0]\n\n self.assertFalse(reserved_quant.in_date)\n self.assertFalse(reserved_quant.package_id)\n self.assertEqual(reserved_quant, self.quant2)", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def free_item_discount(self, items):\n new_items = []\n occurrences = collections.Counter(item.sku for item in items)\n if occurrences.get(MacbookPro.sku, 0) > 0:\n for item in items:\n if item.sku == VGAadapter.sku and item.price != 0:\n item.price = 0\n else:\n new_items.append(CheckoutItem(sku=VGAadapter.sku, price=0))\n items.extend(new_items)\n return items", "def test_lpdaac_good(self):\n self.assertIsNone(api.inventory.check(self.lpdaac_order_good))", "def noCheck():\n dislin.nochek()", "def sell():\n return apology(\"TODO\")", "def is_proprietary():\n return False", "def needRestock(self):\n #TODO check if the quantity<threshold and return true if it is\n #we'll set for now the threshold at *five* items\n #so we need to check if self.quantity is less than five.\n threshold = 5\n if self.quantity < threshold:\n return True\n else:\n return False", "def test_basicSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we added the item\n ret = self.confirm_line(-1, \"Generic Item\", \"$0.01\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()", "def ingredient_used_canceled(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used canceled initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.used.id)]\n , order=[('batch_number', 'DESC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n # pdb.set_trace()\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def can_afford_card(self,\n card: Card) -> bool:\n price_after_discount = card.price % self.discount()\n missing_gems = 0\n for gem_color in GemColor:\n if gem_color != GemColor.GOLD:\n missing_gems += max(price_after_discount.value(gem_color) - self.gems_possessed.value(gem_color),0)\n return self.gems_possessed.value(GemColor.GOLD) >= missing_gems", "def allBoatsSunk(self):\n for boat in self.boats:\n if not boat.isCaput():\n return False\n return True", "def climb(self):\n print(\"Inside WoodElf.climb\")", "def is_bull_market(self):\n return self.port_return(self.data.last('2M')) >= .2", "def test_cant_create_order_twice(self):\n\t\to2 = BuyInfluenceOrder(\n\t\t\tplayer=self.p\n\t\t)\n\n\t\tself.assertRaises(OrderNotAvailable, o2.clean)", "def test_do_apogee_science_500s_after_1000s_cart7(self):\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 1000)\n self.assertEqual(cmdState.keywords['expTime'], 1000)\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 500)\n self.assertEqual(cmdState.keywords['expTime'], 500)", "def filter_untracked(quant) -> bool:\n return quant.lot_id is None", "def set_equivalent_cart(self, base_cart):\n\t\t# Emptying cart\n\t\tself.empty()\n\n\t\t# Getting base cart content\n\t\tcontents = base_cart.cart_content_set.all()\n\t\tbase_osm = base_cart.osm\n\t\tequivalence_store = {}\n\n\t\tfor content in contents:\n\t\t\tbase_product = content.product\n\t\t\tquantity = content.quantity\n\n\t\t\t# First, looking for a match\n\t\t\tmatch = base_product.productmatch_set.all()\n\t\t\tif len(match)>0:\n\t\t\t\tmatch = match[0]\n\t\t\t\tmathed_product = getattr(match, self.cart.osm+'_product') # Evil hack!! Or is it? I love Python :D\n\t\t\t\tif mathed_product is not None:\n\t\t\t\t\tmatch_content = self.add_product(mathed_product, quantity, is_user_added = False, is_match = True, is_suggested = False)\n\t\t\t\t\tsetattr(match_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, match_content.cart.osm+'_content', match_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': match_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': True,\n\t\t\t\t\t\t'is_suggested': False\n\t\t\t\t\t}\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmatch_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\t# print '\\tMatch : '+mathed_product.url\n\t\t\t\telse:\n\t\t\t\t\t# Look for similarities\n\t\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcontent.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\t\t\t\t\telse:\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\n\n\t\t\telse:\n\t\t\t\t# Look for similarities\n\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\t\t\t\telse:\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\n\n\t\treturn equivalence_store", "def test_do_apogee_science_1000s_after_500s_cart7(self):\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 500)\n self.assertEqual(cmdState.keywords['expTime'], 500)\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 1000)\n self.assertEqual(cmdState.keywords['expTime'], 1000)", "def test_gather_location_no_product(self):\n gathered_items = self.Quant._gather(self.apple, self.test_stock_location_02)\n # Check the number of apple quants returned is correct\n self.assertFalse(len(gathered_items))", "async def should_handle(self):\n return self.main.base_amount > 4 and self.main.can_build_unique(UnitTypeId.INFESTATIONPIT, self.main.pits)", "def test_redirect_with_empty_cart(self):\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 302)", "def should_grow_on_food_collision(self):\n return True", "def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def cart(request):\n \"\"\"crt = Cart(request)\n for item in crt:\n if item['quantity'] > 1:\n item['quantity'] = 1\"\"\"\n return {'cart': Cart(request)}", "def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def buy_one_get_one(products):\n if 'p1' in products and products['p1'] >= 2:\n return -20\n else:\n return 0", "def test_shopping_cart_is_empty(self):\n self.login_browser_user()\n url = self.live_server_url + reverse('orders:shopping_cart')\n self.browser.get(url)\n\n self.assertEqual(\n self.browser.find_element_by_tag_name('p').text,\n \"Your shopping cart is empty.\"\n )", "def cointoss():\n return random.random() < 0.5", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def test_add_to_cart_item_at_minimum_stock(self):\n response = self.client.get(\n '/self.base_url/sales/2/1',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"The following product has reached the mimimum stock, please contact the admin for sales below minimum stock\")\n self.assertEqual(response.status_code,200)", "def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")" ]
[ "0.613266", "0.6027911", "0.5957148", "0.58498114", "0.5780373", "0.57664615", "0.57352144", "0.5670634", "0.5603377", "0.5565635", "0.55271673", "0.5519921", "0.55182767", "0.55163413", "0.54893744", "0.54880553", "0.54810375", "0.54696536", "0.5443818", "0.543117", "0.53950846", "0.53874665", "0.53808635", "0.5374204", "0.53382814", "0.5329625", "0.5325854", "0.53212994", "0.52942413", "0.5283639", "0.52661276", "0.5256112", "0.52392685", "0.52292824", "0.52287287", "0.52280384", "0.52192014", "0.5218997", "0.52174103", "0.52153707", "0.5212322", "0.5209741", "0.51969075", "0.51906925", "0.51856154", "0.5185529", "0.5181521", "0.51677454", "0.5153048", "0.5148191", "0.5136067", "0.51338434", "0.5127383", "0.5120716", "0.5118449", "0.5115728", "0.5113614", "0.51133263", "0.5105191", "0.5104516", "0.50989294", "0.5095097", "0.508895", "0.5078001", "0.5074762", "0.50735694", "0.50728863", "0.506725", "0.50615376", "0.50610244", "0.5059562", "0.50540644", "0.50530255", "0.5050626", "0.5050063", "0.50453526", "0.5039665", "0.5039461", "0.50390255", "0.5036734", "0.5031057", "0.50265515", "0.5025822", "0.5019179", "0.5014276", "0.50102633", "0.5009124", "0.5008388", "0.49907377", "0.49897674", "0.4986597", "0.49726984", "0.49699536", "0.49664864", "0.4964904", "0.49634263", "0.49616882", "0.49616882", "0.49578816", "0.49538454", "0.49534658" ]
0.0
-1
coobserving carts should close the apogee shutter first.
def test_do_boss_calibs_one_arc_coobserve(self): cmdState = CmdState.DoBossCalibsCmd() cmdState.nArc = 1 sopTester.updateModel('guider', TestHelper.guiderState['apogeemangaDitherLoaded']) sopTester.updateModel('mcp', TestHelper.mcpState['apogee_parked']) sopTester.updateModel('apogee', TestHelper.apogeeState['B_open']) self._do_boss_calibs(8, 39, 0, 0, cmdState)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cart_flushed(self):\n self.fill_session_cart()\n\n session = self.client.session\n self.assertNotEqual(session['cart'], {})\n self.assertNotEqual(session['cart_cost'], 0)\n\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n\n session = self.client.session\n self.assertEqual(session['cart'], {})\n self.assertEqual(session['cart_cost'], 0)", "def test_shoppingcart_must_not_update_if_closed(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the closed shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n data[\"is_closed\"] = True\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then check for fail in update shoppingcart\n self.url = reverse(\"update-shoppingcart\")\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def discount(self, cart):", "def test_add_to_cart_item(self, app, pm):\n logging.basicConfig(filename='/home/osboxes/pytest_mobile/logs/test.log', level=logging.DEBUG, filemode=\"w\")\n app.browser.tap_button(pm.start_page.get_skip_button())\n app.browser.tap_button(pm.main_page.get_cart_button())\n result = app.browser.get_text(pm.my_cart_page.get_cart_empty_cart())\n assert result == \"Your Cart is Empty\"\n app.browser.tap_button(pm.menu_items.get_back_button())", "def checkout_cart(self, cart):\n pass", "def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')", "def test_do_apogee_science_500s_after_1000s_cart7(self):\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 1000)\n self.assertEqual(cmdState.keywords['expTime'], 1000)\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 500)\n self.assertEqual(cmdState.keywords['expTime'], 500)", "def test_do_apogee_science_1000s_after_500s_cart7(self):\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLeadCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 500)\n self.assertEqual(cmdState.keywords['expTime'], 500)\n\n sopTester.updateModel('platedb', TestHelper.platedbState['apogeeLead1000sCart7'])\n self._update_cart(7, 'APOGEE')\n cmdState = self.actorState.doApogeeScience\n cmdState.reinitialize(self.cmd)\n self.assertEqual(cmdState.expTime, 1000)\n self.assertEqual(cmdState.keywords['expTime'], 1000)", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = float(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def close_orders(self):", "async def stocks(self, ctx):\n\t\tpass", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = int(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')", "def api_confirm_cart():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tfor row in cur.execute('SELECT * FROM cart').fetchall():\r\n\t\t(id1,title,count,price) = row['id'], row['title'], row['inventory_count'],row['price']\r\n\t\tprint(row)\r\n\t\tcur.execute('UPDATE products SET inventory_count = inventory_count-1 WHERE id=? AND inventory_count>0',(id1,))\r\n\t\tprint(\"Reduced from \"+ str(count)+\"/n\")\r\n\tp= cur.execute('SELECT * FROM products').fetchall()\r\n\tcur.execute('DELETE FROM cart;')\r\n\tconn.commit()\r\n\treturn jsonify(p)", "def test_keep_cart_on_login(self):\n request = self.factory.post('/shop/auth/login', follow=True)\n request.customer = Customer()\n request.customer.save()\n request.session = {'session_key': 'keep_cart'}\n request.user = self.bart\n old_cart = Cart.objects.get_from_request(request)\n user_logged_in.send(sender=self.bart.__class__, request=request, user=self.bart)\n new_cart = Cart.objects.get_from_request(request)\n self.assertEqual(new_cart.customer, request.customer)\n self.assertEqual(new_cart, old_cart)", "def test_flipkart_flow(self):\n try:\n self.google_page.enter_google_search_text(message.FLIPKART_TEXT)\n self.google_page.display_google_search_suggestions()\n self.google_page.press_enter_from_google_search_textbox()\n self.google_page.click_on_flipkart_link()\n \n self.flipkart_page.close_login_popup()\n self.flipkart_page.navigate_window_air_conditioners_page()\n self.flipkart_page.select_add_compare_checkbox(2)\n self.flipkart_page.select_add_compare_checkbox(3)\n self.flipkart_page.select_add_compare_checkbox(6)\n self.flipkart_page.click_on_add_compare_button()\n \n # print item details\n self.flipkart_page.print_item_details(1)\n self.flipkart_page.print_item_details(2)\n self.flipkart_page.print_item_details(3)\n\n # get compare item page url and display avalibility\n self.compare_page_url = self.driver.current_url\n self.flipkart_page.add_to_cart(1)\n self.driver.get(self.compare_page_url) \n self.flipkart_page.add_to_cart(2)\n self.driver.get(self.compare_page_url)\n self.flipkart_page.add_to_cart(3)\n self.flipkart_page.verify_avalibility_of_items_by_pincode(conf.PINCODE)\n\n print('------ Delivery details for {} pincode --------'.format(conf.PINCODE))\n self.flipkart_page.print_item_delivery_msg(1)\n self.flipkart_page.print_item_delivery_msg(2)\n self.flipkart_page.print_item_delivery_msg(3)\n\n self.flipkart_page.check_again_avalibility_of_items_by_pincode(conf.PINCODE2)\n\n print('------ Delivery details for {} pincode --------'.format(conf.PINCODE2))\n self.flipkart_page.print_item_delivery_msg(1)\n self.flipkart_page.print_item_delivery_msg(2)\n self.flipkart_page.print_item_delivery_msg(3) \n except Exception as msg:\n print(str(msg))", "def test_update_shopping_cart(self):\n food_cost = self.browser.find_element_by_id('food-cost')\n old_food_cost = int(food_cost.text)\n\n items = self.get_list_of_items()\n index = randint(1, len(items) - 1)\n list_item = self.get_item_dict(items[index])\n item_price = self.expected_contents[index]['price']\n old_cost = self.expected_contents[index]['cost']\n\n increase_by = randint(5, 10)\n directions = [\n {\n 'action': 'increase',\n 'range': range(1, increase_by + 1)\n },\n {\n 'action': 'decrease',\n 'range': range(increase_by - 1, - 1, -1)\n }\n ]\n for direction in directions:\n for i in direction['range']:\n list_item[direction['action']].click()\n sleep(0.1)\n new_cost = int(list_item['cost'].text)\n new_food_cost = int(food_cost.text)\n self.assertTrue(new_food_cost - old_food_cost ==\n new_cost - old_cost == item_price * i)", "def test_add_and_remove_two_items(self):\n login = LoginPage(self.driver) #SAUCE-LAB-5\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products[0]\n first_item: InventoryItem\n first_item.add_to_cart()\n print('\\n')\n print(first_item.get_title())\n print(first_item.get_description())\n print(first_item.get_price())\n print('*' * 80)\n second_item = inventory_page.products[4]\n second_item: InventoryItem\n second_item.add_to_cart()\n print('\\n')\n print(second_item.get_title())\n print(second_item.get_description())\n print(second_item.get_price())\n print('*' * 80)\n first_item.remove_from_cart()\n second_item.remove_from_cart()\n print(f'Products {first_item.get_title()} and {second_item.get_title()} were successfully removed')", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = float(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def outofstock_pop(self):\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'button#close-button')))\n popup=self.driver.find_element_by_css_selector('button#close-button')\n action = TouchActions(self.driver)\n action.tap(popup).perform()", "def test_basicNoSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Don't add the item\n pos.click(\"Ok\")\n \n # Confirm we aren't in a transaction\n if self.in_transaction():\n self.tc_fail(\"Unintentionally In Transaction\")\n else:\n self.log.info(\"Confirmed we are not in a transaction\")\n \n # Setup for next test\n self.recover()", "def test_shopping_cart_not_empty(self):\n expected_contents = self.fill_session_cart()\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.context['contents'], expected_contents)", "def cart_contents(request):\n cart_items = []\n total = 0\n savings = 0\n product_count = 0\n points_available = 0\n points_earned = 0\n discount_applied = request.session.get('discount_applied')\n cart = request.session.get('cart', {})\n\n # Create a new dict so that items can be removed if needed\n new_dict = {k: v for k, v in cart.items()}\n\n for item, quantity in new_dict.items():\n # Use string created in cart view to isolate model ids\n product_id = item.split(\"_\")[0]\n size_id = item.split(\"_\")[1]\n nic_id = item.split(\"_\")[2]\n\n # Retrieve relevant objects for templating and remove if\n # no longer in database\n try:\n product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item was removed from your cart as it is \\\n no longer available. Try to find a worthy replacement!')\n continue\n # Repeat for Size\n try:\n size = Size.objects.get(pk=size_id)\n except Size.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n size is no longer available. \\\n Try to find a worthy replacement!')\n continue\n # Repeat for Nicotine\n try:\n nic = Nicotine.objects.get(pk=nic_id)\n except Nicotine.DoesNotExist:\n del cart[item]\n messages.error(request, 'An item could not be added as its \\\n nicotine options have changed. \\\n Try to find a worthy replacement!')\n continue\n\n # Check sale status and retrieve relevant price from Size model\n if product.on_sale:\n price = size.sale_price\n savings += (size.price - size.sale_price) * quantity\n else:\n price = size.price\n total += quantity * price\n product_count += quantity\n cart_items.append({\n 'item_id': item,\n 'product': product,\n 'size': size,\n 'nic': nic,\n 'price': price,\n 'quantity': quantity,\n })\n\n original_total = total\n request.session['cart'] = cart\n\n # Get user profile\n if request.user.is_authenticated:\n profile = get_object_or_404(UserProfile, user_id=request.user)\n\n else:\n profile = None\n\n # Check for available points\n if profile:\n points_available = profile.points\n\n # Check if user has chosen to redeem points and that the discount\n # will never take the total below zero\n if discount_applied:\n if total - Decimal(points_available / 100) <= 0:\n total = 0\n\n else:\n total -= Decimal(points_available / 100)\n\n if total < settings.FREE_DELIVERY_THRESHOLD:\n delivery = Decimal(settings.STANDARD_DELIVERY)\n free_delivery_delta = settings.FREE_DELIVERY_THRESHOLD - total\n\n else:\n delivery = 0\n free_delivery_delta = 0\n\n grand_total = delivery + total\n points_earned = int(math.floor(total))\n\n context = {\n 'cart_items': cart_items,\n 'total': total,\n 'original_total': original_total,\n 'savings': savings,\n 'product_count': product_count,\n 'delivery': delivery,\n 'free_delivery_delta': free_delivery_delta,\n 'free_delivery_threshold': settings.FREE_DELIVERY_THRESHOLD,\n 'grand_total': grand_total,\n 'points_available': points_available,\n 'discount_applied': discount_applied,\n 'points_earned': points_earned,\n }\n\n return context", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = int(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_update_cart(self, driver):\n\n logging.info(\"Start test case: Edit product in orderSummary\")\n data = self.test_data[\"Edit product in orderSummary\"]\n products = data[\"Products\"]\n logging.info(\"Test data: {}\".format(products))\n\n for i in range(len(products)):\n select_product(driver, products[i][\"Page\"], products[i][\"Product Name\"])\n add_product_to_cart(driver, products[i][\"Size\"], products[i][\"Color\"], products[i][\"Quantity\"])\n\n added_name = get_product_name(driver, index=data[\"Added Index\"] - 1)\n update_quantity_in_cart(driver, name=added_name, added_amount=data[\"Added Amount\"])\n expected_qty = get_product_detail_in_cart(driver, added_name)[\"Qty\"]\n\n removed_name = get_product_name(driver, index=data[\"Removed Index\"] - 1)\n remove_product_from_cart(driver, name=removed_name)\n expected_amt = get_product_amount_in_cart(driver)\n\n checkout_from_order_summary(driver)\n actual_amt = get_product_amount_in_order(driver)\n actual_qty = get_product_detail_in_order(driver, added_name)[\"Qty\"]\n logging.info(\"Verify product amount and product quantity on checkout page\")\n assert actual_amt == expected_amt, f\"your cart product amount is {actual_amt}, it should be {expected_amt}\"\n assert actual_qty == expected_qty, f\"The quantity of added product {added_name} is {actual_qty}, it should be {expected_qty}\"\n assert not verify_product_in_order(driver, removed_name)", "def test_remove_item_from_cart(self):\n items = self.get_list_of_items()\n index = randint(1, len(items) - 1)\n list_item = self.get_item_dict(items[index])\n\n food_cost = self.browser.find_element_by_id('food-cost')\n old_food_cost = int(food_cost.text)\n cost_loss = self.expected_contents[index]['cost']\n list_item['remove'].click()\n\n new_food_cost = int(food_cost.text)\n self.assertEqual(new_food_cost, old_food_cost - cost_loss)\n self.assert_element_stale(items[index])", "def climb(self):\n print(\"Inside WoodElf.climb\")", "def set_equivalent_cart(self, base_cart):\n\t\t# Emptying cart\n\t\tself.empty()\n\n\t\t# Getting base cart content\n\t\tcontents = base_cart.cart_content_set.all()\n\t\tbase_osm = base_cart.osm\n\t\tequivalence_store = {}\n\n\t\tfor content in contents:\n\t\t\tbase_product = content.product\n\t\t\tquantity = content.quantity\n\n\t\t\t# First, looking for a match\n\t\t\tmatch = base_product.productmatch_set.all()\n\t\t\tif len(match)>0:\n\t\t\t\tmatch = match[0]\n\t\t\t\tmathed_product = getattr(match, self.cart.osm+'_product') # Evil hack!! Or is it? I love Python :D\n\t\t\t\tif mathed_product is not None:\n\t\t\t\t\tmatch_content = self.add_product(mathed_product, quantity, is_user_added = False, is_match = True, is_suggested = False)\n\t\t\t\t\tsetattr(match_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, match_content.cart.osm+'_content', match_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': match_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': True,\n\t\t\t\t\t\t'is_suggested': False\n\t\t\t\t\t}\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmatch_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\n\t\t\t\t\t# print '\\tMatch : '+mathed_product.url\n\t\t\t\telse:\n\t\t\t\t\t# Look for similarities\n\t\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcontent.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tconnection._rollback()\n\t\t\t\t\telse:\n\t\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t\t}\n\n\n\t\t\telse:\n\t\t\t\t# Look for similarities\n\t\t\t\tsimilarities = self.get_similarites(base_product, base_osm)\n\t\t\t\tif(len(similarities)>0):\n\t\t\t\t\tsim_content = self.add_product(similarities[0][0], quantity, is_user_added = False, is_match = False, is_suggested = True)\n\t\t\t\t\tsetattr(sim_content, content.cart.osm+'_content', content)\n\t\t\t\t\tsetattr(content, sim_content.cart.osm+'_content', sim_content)\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': sim_content,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcontent.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsim_content.save()\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tconnection._rollback()\n\t\t\t\telse:\n\t\t\t\t\tequivalence_store[content] = {\n\t\t\t\t\t\t'content': None,\n\t\t\t\t\t\t'is_user_added': False,\n\t\t\t\t\t\t'is_match': False,\n\t\t\t\t\t\t'is_suggested': True\n\t\t\t\t\t}\n\n\n\t\treturn equivalence_store", "def pol_to_cart():\n pass", "def bogof_discount(self):\n bogof_discount = 0\n for item in self.cart.items:\n if item.quantity > 1:\n bogof_discount += (math.floor(item.quantity / 2) * item.product.price)\n\n self.cart._total -= bogof_discount", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def test_basicSalePC(self):\n # Basic price check\n self.log.info(\"Price checking Generic Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Generic Item\")\n \n # Confirm the right item, at the right price\n self.read_price_check(\"Generic Item\", \"$0.01\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we added the item\n ret = self.confirm_line(-1, \"Generic Item\", \"$0.01\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()", "def test_inTransSalePC(self):\n # Start a transasction\n pos.click_speed_key(\"Generic Item\")\n \n # Void the item to an empty transaction\n # NOTE: Should uncomment this when related defect is fixed (likely in MERLIN-1335)\n #pos.click(\"Void item\")\n \n # Repeat earlier test\n self.test_basicSalePC()", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def hay_stock(self):\n return self.producto.stock_disponible >= 0", "def ingredient_used_canceled(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used canceled initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.used.id)]\n , order=[('batch_number', 'DESC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n # pdb.set_trace()\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_update_shoppingcart_view(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe the update\n self.url = reverse(\"update-shoppingcart\")\n data = { **self.shoppingcart_data }\n data[\"is_closed\"] = True\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n if response.status_code == status.HTTP_200_OK:\n r_json = response.json()\n self.assertTrue(r_json[\"cart\"][\"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])", "def test_shopping_cart_displays_total_cost(self):\n expected_cart_cost = 0\n for item in self.fill_session_cart():\n expected_cart_cost += item['price'] * item['amount']\n\n self.client.get(self.SHOP_CART_URL)\n self.assertEqual(self.client.session['cart_cost'], expected_cart_cost)", "def cehs():\n\tcloseEHShutter()", "def test_can_check_if_cart_has_item_needing_delivery(self):\n request = HttpRequest()\n engine = import_module(settings.SESSION_ENGINE)\n session_key = None\n request.session = engine.SessionStore(session_key)\n\n # below two lines of code from https://stackoverflow.com/questions/11938164/why-dont-my-django-unittests-know-that-messagemiddleware-is-installed\n # fixes bug where test fails because unittest\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n\n random_non_delivery_item_1 = choice(StoreItem.objects.filter(delivery_required=False))\n random_non_delivery_item_2 = choice(\n StoreItem.objects.filter(delivery_required=False).exclude(id=random_non_delivery_item_1.id))\n random_delivery_item = choice(StoreItem.objects.filter(delivery_required=True))\n\n cart_add(request, random_non_delivery_item_1.id)\n self.assertFalse(cart_contains_item_needing_delivery(request))\n cart_add(request, random_non_delivery_item_2.id)\n self.assertFalse(cart_contains_item_needing_delivery(request))\n cart_add(request, random_delivery_item.id)\n self.assertTrue(cart_contains_item_needing_delivery(request))", "def test_find_stock_items(self):\n pass", "def test_delete_from_cart(open_browser, mysql_executor):\n HelperUrl.user_base_url(open_browser)\n MainPage(open_browser).open_product_page()\n ProductPage(open_browser).add_to_cart()\n CheckExistenceDB(mysql_executor).check_exist()\n Header(open_browser).open_cart_block() \\\n .delete_from_cart_block()\n CheckExistenceDB(mysql_executor).check_is_not_exist()", "def test_checkout_page_ie(self):\n # login as our customer\n logged_in = self.client.login(username=self.username, password=self.password)\n self.assertEqual(logged_in, True)\n\n cart_response = self.client.get(reverse('lfs_cart'))\n self.assertContains(cart_response, self.PRODUCT1_NAME, status_code=200)\n\n checkout_response = self.client.get(reverse('lfs_checkout'))\n\n # we expect a list of irish counties in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Offaly', status_code=200)\n\n # we expect a list of american states in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Washington', status_code=200)", "def test_checkout_page_ie(self):\n # login as our customer\n logged_in = self.client.login(username=self.username, password=self.password)\n self.assertEqual(logged_in, True)\n\n cart_response = self.client.get(reverse('lfs_cart'))\n self.assertContains(cart_response, self.PRODUCT1_NAME, status_code=200)\n\n checkout_response = self.client.get(reverse('lfs_checkout'))\n\n # we expect a list of irish counties in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Offaly', status_code=200)\n\n # we expect a list of american states in the response as we have an Irish shipping address\n self.assertContains(checkout_response, 'Washington', status_code=200)", "def compare_price(self):\n if self.__product_price < self.__alert_price:\n #print(\"price drop...\")\n self.__alert_client = True\n self.__price_difference = self.__product_price - self.__alert_price\n else:\n #print(\"Price not reduced...\")\n self.__alert_client = False\n self.__price_difference = self.__product_price - self.__alert_price", "def test_add_to_cart(self):\n\n # Log the user in that is not the seller\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Confirm that product title appears in cart\n response = self.client.get(reverse('website:cart'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Ensure that the cart displays product title, but not the title for product2\n self.assertIn('<h6 class=\"mr-auto p-2\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mr-auto p-2\">Test Product2</h6>'.encode(), response.content)\n\n # Confirm that the post returns a response of 302\n response = self.client.get(reverse(\"website:add_to_cart\", args=(1,)))\n self.assertEqual(response.status_code, 302)", "def test_view_cart_contents(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 200)", "def checkout():\n global total_price\n total_price = 0\n update_total_price()\n tk_items.delete(0, tk.END)\n subprocess.run(['aplay', 'img/regi2.wav'])", "def main():\n\n dict_logs = {\n 'logs/e_log.log': logging.ERROR,\n 'logs/c_log.log': logging.INFO\n }\n\n logger = create_logger(**dict_logs)\n\n # For using Chrome\n browser = webdriver.Chrome('chromedriver.exe')\n\n url = 'https://www.bestbuy.com/site/nvidia-geforce-rtx-3090-24gb-gddr6x-pci-express-4-0-' \\\n 'graphics-card-titanium-and-black/6429434.p?skuId=6429434'\n\n # BestBuy RTX 3090 page\n browser.get(url)\n\n purchased = False\n increment = 0\n\n logger.info(f'Starting Web Scraping')\n # Try to add to cart\n while not purchased:\n\n try:\n # If success, product is out of stock, don't need the return\n browser.find_element_by_class_name(\"btn-disabled\")\n increment += 1\n if increment % (60*12) == 0: # Update STDOUT every 12 hours\n logger.info(f'Product not available')\n time.sleep(60)\n browser.refresh()\n\n except NoSuchElementException as e:\n logger.error(e)\n # Product in stock\n add_to_cart_button = browser.find_element_by_class_name('btn-primary')\n # Click the button and end the script\n add_to_cart_button.click()\n send_notification(config.get('TWILIO', 'twilio_to_number'))\n purchased = True\n\n logger.info('Product is in the shopping cart')\n\n # Hold the window open until manual purchase can be made\n while True:\n pass", "def test_shoppingcart_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performe update\n data = self.shoppingcart_data\n data[\"quantity\"] = 20\n data[\"discount_value\"] = 9.99\n data[\"is_closed\"] = True\n self._update_model(\"shoppingcart\", id, data, [\"quantity\", \"discount_value\", \"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)", "def test_search_shoes_item_to_buy(self):\n self.driver.find_element_by_id(\"search_query_top\").send_keys(\"shoes\")\n self.driver.find_element_by_name(\"submit_search\").click()\n self.driver.find_element_by_xpath(\n \"/html/body/div[1]/div[2]/div/div[3]/div[2]/ul/li[2]/div/div[1]/div/a[1]/img\").click()\n self.driver.find_element_by_name(\"Submit\").click()\n time.sleep(5)", "def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)", "def test_add_to_cart_item_at_minimum_stock(self):\n response = self.client.get(\n '/self.base_url/sales/2/1',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"The following product has reached the mimimum stock, please contact the admin for sales below minimum stock\")\n self.assertEqual(response.status_code,200)", "def test_putaway_after_manufacturing_3(self):\n self.laptop.tracking = 'serial'\n mo_laptop = self.new_mo_laptop()\n serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id, 'company_id': self.env.company.id})\n\n mo_form = Form(mo_laptop)\n mo_form.qty_producing = 1\n mo_form.lot_producing_id = serial\n mo_laptop = mo_form.save()\n mo_laptop.button_mark_done()\n\n # We check if the laptop go in the depot and not in the stock\n move = mo_laptop.move_finished_ids\n location_dest = move.move_line_ids.location_dest_id\n self.assertEqual(location_dest.id, self.depot_location.id)\n self.assertNotEqual(location_dest.id, self.stock_location.id)", "def test_manufacturing_scrap(self):\n\n # Update demo products\n (self.product_4 | self.product_2).write({\n 'tracking': 'lot',\n })\n\n # Update Bill Of Material to remove product with phantom bom.\n self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink()\n\n # Create Inventory Adjustment For Stick and Stone Tools with lot.\n lot_product_4 = self.env['stock.production.lot'].create({\n 'name': '0000000000001',\n 'product_id': self.product_4.id,\n 'company_id': self.env.company.id,\n })\n lot_product_2 = self.env['stock.production.lot'].create({\n 'name': '0000000000002',\n 'product_id': self.product_2.id,\n 'company_id': self.env.company.id,\n })\n\n stock_inv_product_4 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stick',\n 'product_ids': [(4, self.product_4.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.stock_location_14.id}),\n ]})\n\n stock_inv_product_2 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stone Tools',\n 'product_ids': [(4, self.product_2.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.stock_location_14.id})\n ]})\n (stock_inv_product_4 | stock_inv_product_2)._action_start()\n stock_inv_product_2.action_validate()\n stock_inv_product_4.action_validate()\n\n #Create Manufacturing order.\n production_form = Form(self.env['mrp.production'])\n production_form.product_id = self.product_6\n production_form.bom_id = self.bom_3\n production_form.product_qty = 12\n production_form.product_uom_id = self.product_6.uom_id\n production_3 = production_form.save()\n production_3.action_confirm()\n production_3.action_assign()\n\n # Check Manufacturing order's availability.\n self.assertEqual(production_3.reservation_state, 'assigned', \"Production order's availability should be Available.\")\n\n location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id,\n\n # Scrap Product Wood without lot to check assert raise ?.\n scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id})\n with self.assertRaises(UserError):\n scrap_id.do_scrap()\n\n # Scrap Product Wood with lot.\n self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id})\n\n #Check scrap move is created for this production order.\n #TODO: should check with scrap objects link in between", "def mock_candle_producers(state: SharedState):", "def test_cancel_shipment_old(self):\n pass", "def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')", "def simulate(tracks, carts):\n collisions = set()\n spots = set((c[1], c[0]) for c in carts)\n new_carts = []\n while carts:\n cart = heappop(carts)\n y, x, = cart[0:2]\n if (x, y) in collisions:\n continue\n spots.remove((x, y))\n\n new_cart = move(cart, tracks)\n y, x = new_cart[0:2]\n if (x, y) in spots:\n collisions.add((x, y))\n spots.add((x, y))\n continue\n spots.add((x, y))\n heappush(new_carts, new_cart)\n\n carts = new_carts\n return carts, collisions", "def on_market_info(self):\n pass", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def test_get_cart(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', True)\n self.assertEqual(self.cart_item_manager.get_cart(user_id, cart_id),\n self.dynamo_accessor.get_item(config.dynamo_cart_table_name,\n keys={'UserId': user_id, 'CartId': cart_id}))", "def show_cart(update, context):\n bot = context.bot\n query = update.callback_query\n\n chat_id = update.effective_chat.id\n user = update.effective_user\n\n # all items ordered by user in message and his price to pay for them\n message_and_price = str_user_cart(chat_id, user.id)\n # InlineKeyboard back to start menu\n keyboard = [[InlineKeyboardButton(\"back to menu\", callback_data=str(ONE))]]\n # change last message send by bot\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message_and_price['message'],\n reply_markup=InlineKeyboardMarkup(keyboard))\n # notify ConversationHandler of SEVENTH stage\n return SEVENTH", "def sell():\n return apology(\"TODO\")", "def test_add_to_cart(self):\n\n # test sale item that can be sold\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"These are the items on your Cart\")\n self.assertEqual(response.status_code,200)", "def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")", "def test_update_cart_name_same_name(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart1'})", "def EmptyCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_shoppingcart_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performing detail\n self._detail_model(\"shoppingcart\", self.shoppingcart_data, id, [\"quantity\", \"discount_value\", \"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def main():\n root = Tk()\n login_window(root, \"Alcohol Store\", \"250x150\")\n root.iconbitmap('picture/Alcohol for all-logos_transparent.ico')\n root.mainloop()\n # try:\n #\n # logger.info(\"StoreApp Ready\")\n #\n # store = Store()\n # #store.display_drinks_in_store()\n #\n #\n #\n #\n # #Under age example\n # #--------------------------------------------------------------------\n # drink = Drink(\"Wiski\", \"RedLabel\", 1055, 500, 0)\n # buyer = Buyer(\"Wihbe\", 2055, 5022, 30)\n # sale = Sale(558956)\n #\n # wihbe_product_sale = ProductSale(5, sale, drink, buyer)\n # store.sell_product(wihbe_product_sale)\n #\n # #Not exist in the stock example.\n # #--------------------------------------------------------------------\n #\n # buyer_amit = Buyer(\"Amit\", 20365899, 504808196, 80)\n # amit_product_sale = ProductSale(5, sale, drink, buyer_amit)\n # store.sell_product(amit_product_sale)\n #\n # #Purchase example.\n # #--------------------------------------------------------------------\n # supplier = Supplier(\"Chen\",\"Haifa\", 11, 559842658)\n # product_purchase = ProductPurchase(drink, supplier, \"Buying vodka\", 3)\n # store.product_purchase_from_supplier(product_purchase)\n #\n # #Selling not enough products.\n # #--------------------------------------------------------------------\n # store.sell_product(amit_product_sale)\n #\n # #Another purchase\n # #--------------------------------------------------------------------\n # store.product_purchase_from_supplier(product_purchase)\n #\n # #Selling with the right amount.\n # #--------------------------------------------------------------------\n # store.sell_product(amit_product_sale)\n #\n # #Display drink in store.\n # #--------------------------------------------------------------------\n # store.display_drinks_in_store()\n #\n # #Display Purchase and Selling history in store.\n # #--------------------------------------------------------------------\n # store.display_product_purchases_from_supplier()\n #\n #\n # logger.info(\"Finshed running\")\n #\n # except Exception as e:\n # exc_traceback = sys.exc_info()[2] # Full traceback address\n #\n # print(\n # \"\\n=======================================================================================================================\")\n # logger.error(\"Exception 'e': {0} -- Error started in line: {1}\".format(e, exc_traceback.tb_lineno))\n # print(\n # \"=======================================================================================================================\\n\")\n #\n # print(\n # \"=======================================================================================================================\")\n # full_traceback = traceback.format_tb(exc_traceback)\n # logger.error(\"main -> Exception: printing full traceback stack\")\n # # full_traceback.reverse()\n # for error_trace in full_traceback:\n # # trace_arrange = error_trace.split(',')\n # trace_arrange = re.split(', |\\n', error_trace)\n # logger.error(\"Method: {0} -- {1} -- Error: {2} -- {3}\".format(trace_arrange[2], trace_arrange[1],\n # trace_arrange[3].strip(), trace_arrange[0]))\n #\n # print(\n # \"=======================================================================================================================\\n\")\n # logger.info(r'Log file save in: {0}\\{1}.log'.format(get_working_dir(), get_project()))\n #\n # finally:\n # release_logger()", "def test_add_stock_item(self):\n pass", "def test_can_not_access_checkout_with_empty_cart(self):\n responses = [self.client.post(self.CHECKOUT_URL, follow=True),\n self.client.get(self.CHECKOUT_URL, follow=True)]\n\n for response in responses:\n self.assertRedirects(response, reverse('orders:shopping_cart'))\n message = list(response.context.get('messages'))[0]\n self.assertEqual(message.tags, 'error')\n self.assertTrue(\"Your cart is empty.\" in message.message)", "def handle_item_consumption(self):\n self.tooltip_focus = None\n self.active_item_index = None\n self.refresh_inventory()\n self.refresh_equipment()", "def cart(request):\n \"\"\"crt = Cart(request)\n for item in crt:\n if item['quantity'] > 1:\n item['quantity'] = 1\"\"\"\n return {'cart': Cart(request)}", "def hook_buy_this_card(self, game, player):\n totrash = [c for c in player.piles[Piles.PLAYED] if c.isTreasure()]\n for c in totrash:\n player.output(f\"Mint trashing {c.name}\")\n player.trash_card(c)", "def test_classical_as_conductor(self):\n self.add_mp3(artist='Artist 1', title='Title 1',\n group='Group 1', conductor='Conductor 1', composer='Composer 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 1', title='Title 2',\n group='Group 2', conductor='Conductor 2', composer='Composer 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.run_add()\n\n self.assertEqual(Artist.objects.count(), 8)\n artist = Artist.objects.get(name='Conductor 2')\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n song_1 = Song.objects.get(title='Title 1')\n song_2 = Song.objects.get(title='Title 2')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertContains(response, '1 album')\n self.assertQuerysetEqual(response.context['songs'].data, [repr(song_2)])\n self.assertContains(response, '1 song')\n self.assertContains(response, str(album))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n \n for artist in Artist.objects.exclude(name='Various'):\n self.assertContains(response, str(artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n\n self.assertNotContains(response, str(song_1))\n self.assertNotContains(response, song_1.get_download_url_html5())\n self.assertNotContains(response, song_1.get_download_url_m3u())\n self.assertContains(response, str(song_2))\n self.assertContains(response, song_2.get_download_url_html5())\n self.assertContains(response, song_2.get_download_url_m3u())", "def get_similar_products(list):\n #initialize cart with random ASIN\n params = {\"Item.1.ASIN\":'B000DLB2FI', 'Item.1.Quantity':1}\n cart = amazon.CartCreate(**params)\n root = objectify.fromstring(cart)\n cartid = _safe_get_element_text('Cart.CartId', root)\n hmac = _safe_get_element_text('Cart.HMAC', root)\n\n #create empty list of similar products\n sblist = []\n \n count = 0 #testing\n\n #iterate through list of original ASINs and retrieve also bought products\n print 'Retrieving \\\"Also Bought\\\" Products!' #testing\n for item in list:\n #add to cart\n amazon.CartClear(CartId=cartid, HMAC=hmac)\n params = {\"Item.1.ASIN\":item, 'Item.1.Quantity':1, 'CartId':cartid, 'HMAC':hmac, 'ResponseGroup':'Cart,CartSimilarities'}\n cart = amazon.CartAdd(**params)\n root = objectify.fromstring(cart)\n \n count +=1 #testing\n print count #testing\n \n #iterate through each similar product and add to list\n if \"SimilarProduct\" in cart:# HOW TO ACCOUNT FOR NO SIMILAR PRODUCTS\n for item2 in root.Cart.SimilarProducts.SimilarProduct:\n if _safe_get_element_text('Title', item2) is not None:\n sblist.append({'Original ASIN' : item,\n 'Associated ASIN' : item2.ASIN,\n 'Title' : item2.Title,\n 'Price' : None,\n 'Currency Code' : None,\n 'Relationship' : \"Also Bought\"})\n\n print 'Total # of \\\"Also Bought\\\" Products: ' + str(len(sblist)) #for testing\n count = 0 #testing\n \n #iterate through each similar prodcut and obtain highest price\n print 'Retrieving prices!' #testing\n for item in sblist:\n if item['Title'] is not None:\n title = filter(lambda x: x in string.printable, item['Title'].text) #remove non-ascii\n item['Title'] = title\n \n count+=1 #testing\n print count #testing\n\n pricelist = amazon.ItemLookup(ItemId=item['Associated ASIN'],ResponseGroup=\"OfferSummary,VariationSummary\")\n priceroot = objectify.fromstring(pricelist)\n #conditionals to check if parent or child ASIN or OOS\n if _safe_get_element_text(\"Items.Item.VariationSummary.HighestPrice.FormattedPrice\", priceroot) is not None: #Parent ASIN\n item['Price'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', priceroot)\n item['Currency Code'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.CurrencyCode', priceroot)\n elif _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.FormattedPrice\", priceroot) is not None: #Child ASIN\n #save price and currency in case no other sellers\n price = _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.FormattedPrice\", priceroot)\n currencycode = _safe_get_element_text(\"Items.Item.OfferSummary.LowestNewPrice.CurrencyCode\", priceroot)\n amazon.CartClear(CartId=cartid, HMAC=hmac)\n params = {\"Item.1.ASIN\":item['Associated ASIN'], 'Item.1.Quantity':1, 'CartId':cartid, 'HMAC':hmac}\n cart = amazon.CartAdd(**params)\n rootcart = objectify.fromstring(cart)\n parentASIN = _safe_get_element_text(\"Cart.ParentASIN\",rootcart) #get Parent ASIN\n parentproduct = amazon.ItemLookup(ItemId=parentASIN, ResponseGroup=\"OfferSummary,VariationSummary\")\n rootparent = objectify.fromstring(parentproduct)\n #No way to obtain highest price without through VariationSummary\n if _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', rootparent) is not None:\n item['Price'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.FormattedPrice', rootparent)\n item['Currency Code'] = _safe_get_element_text('Items.Item.VariationSummary.HighestPrice.CurrencyCode', rootparent)\n else:\n item['Price'] = price\n item['Currency Code'] = currencycode\n\n return sblist", "def test_cook_set_free(cook_busy, product_for_cook):\n cook_busy.set_free(True)\n # if product needs to be cooked\n assert product_for_cook.get_need_cook_status() is True\n cook_busy.cook_dish(product_for_cook)\n assert product_for_cook.get_need_cook_status() is False", "def test_classical_as_conductor_various(self):\n self.add_mp3(artist='Artist 1', title='Title 1',\n group='Group 1', conductor='Conductor 1', composer='Composer 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 2', title='Title 2',\n group='Group 2', conductor='Conductor 2', composer='Composer 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.run_add()\n\n self.assertEqual(Artist.objects.count(), 9)\n artist = Artist.objects.get(name='Conductor 2')\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n song_1 = Song.objects.get(title='Title 1')\n song_2 = Song.objects.get(title='Title 2')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertContains(response, '1 album')\n self.assertQuerysetEqual(response.context['songs'].data, [repr(song_2)])\n self.assertContains(response, '1 song')\n self.assertContains(response, str(album))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n \n for artist in Artist.objects.exclude(name='Artist 1'):\n self.assertContains(response, str(artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n for artist in Artist.objects.filter(name='Artist 1'):\n self.assertNotContains(response, str(artist))\n self.assertNotContains(response, reverse('exordium:artist', args=(artist.normname,)))\n\n self.assertNotContains(response, str(song_1))\n self.assertNotContains(response, song_1.get_download_url_html5())\n self.assertNotContains(response, song_1.get_download_url_m3u())\n self.assertContains(response, str(song_2))\n self.assertContains(response, song_2.get_download_url_html5())\n self.assertContains(response, song_2.get_download_url_m3u())", "def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE", "def test_add_all(self): #SAUCE-LAB-7\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('\\n')\n print('Not all items were added')", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def test_cancel_shipment(self):\n pass", "def __init__(self):\n self._state: CartState = None", "def test_shopping_cart_is_empty(self):\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your shopping cart is empty.\")\n self.assertQuerysetEqual(response.context['contents'], [])", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)", "def is_out_of_stock(self) -> bool:\n return self.on_hand == 0", "def update(self, context):\n session = Session()\n\n for sub in self.cart:\n offers = session.search(sub.query, self.lat, self.lon, self.radius)\n for offer in sub.handle_offers(offers):\n context.bot.send_message(self.chat_id, text=offer_text(offer))\n\n updates = sub.check_offers()\n for offer in updates['expired']:\n context.bot.send_message(self.chat_id,\n text=offer_text_expired(offer))\n for offer in updates['expiring']:\n context.bot_send_message(self.chat_id,\n text=offer_text_expiring(offer))\n self.config_updated()", "def test_free_product(self):\n product = self.create_product(price=D('0.00'))\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n self.assertEqual(cs_data['amount'], '0.00')\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)", "def next(self):\r\n if self.position.size == 0:\r\n # The condition for activating BUY function --> By checking oversold condition.\r\n if self.rsi_2 < 30 and self.rsi_3 < 40:\r\n self.buyAlert = True\r\n # If BUY is activated and below conditions are met, then aa buy order would be placed.\r\n if self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and self.buyAlert:\r\n size = round((self.broker.getcash() / self.data), 3)\r\n self.order = self.buy(size=size)\r\n self.buyAlert = False\r\n print(round(self.broker.get_cash(), 1))\r\n # print(self.datas[0].low[0])\r\n\r\n if self.position.size != 0:\r\n # The condition for activating SELL_1 function --> Waiting for RSI to reach overbought zone.\r\n if self.rsi_4 > 67:\r\n self.sellAlert1 = True\r\n # If SELL_1 is activated and below conditions are met, then a sell order would be placed.\r\n if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:\r\n self.close()\r\n self.successNum += 1\r\n self.sellAlert1 = False\r\n\r\n # The condition for activating SELL_2 function --> Activated at overbought condition with RSI>85\r\n if self.rsi_4 > 85:\r\n self.sellAlert2 = True\r\n # If SELL_2 is activated and below conditions are met, then a sell order would be placed.\r\n if (self.rsi_4 < 80) and self.sellAlert2:\r\n self.close()\r\n self.successNum += 1\r\n self.sellAlert1 = False\r\n self.sellAlert2 = False\r\n\r\n # Setting Stop Loss for wrongly opened position.\r\n if 0.82 * self.order.executed.price > self.datas[0].close > 0.8 * self.order.executed.price:\r\n self.close()\r\n self.failureNum += 1\r\n print('Shit !!! Failed for {} times.'.format(self.failureNum))", "def test_6(self):\n toothpaste = Store.Product(11, \"toothpaste\", \"dental\", 2, 4)\n milk = Store.Product(12, \"milk\", \"dairy\", 2, 3)\n eggs = Store.Product(14, \"eggs\", \"dairy\", 2, 2)\n apple_juice = Store.Product(13, \"apple juice\", \"drink\", 1, 1)\n\n s = Store.Store()\n s.add_product(toothpaste)\n s.add_product(milk)\n s.add_product(eggs)\n s.add_product(apple_juice)\n\n henry = Store.Customer(\"henry\", \"mrh\", False)\n s.add_member(henry)\n\n s.add_product_to_member_cart(11, \"mrh\")\n s.add_product_to_member_cart(12, \"mrh\")\n s.add_product_to_member_cart(14, \"mrh\")\n self.assertAlmostEqual(s.check_out_member(\"mrh\"), 6.42, \"not the correct checkout amount\")", "def GetCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def is_buy(self):\n return(copysign(1, self.volume)>0)", "def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def calculate_signals(self, event):\n if event.type == 'MARKET':\n for s in self.symbol_list:\n highs = self.bars.get_latest_bars_values(\n s, \"high\", N=self.long_window\n )\n lows = self.bars.get_latest_bars_values(\n s, \"low\", N=self.long_window\n )\n close = self.bars.get_latest_bar_value(s, 'close')\n bar_date = self.bars.get_latest_bar_datetime(s)\n bar_date = datetime.strptime(bar_date[:-4], \"%Y-%m-%dT%H:%M:%S.%f\")\n if highs is not None and len(highs) == self.long_window and \\\n lows is not None and len(lows) == self.long_window:\n\n # close all orders before the end of weekend, Friday 17:00 in this case\n # uncomment this chunk of code if not\n # if bar_date.weekday() == 4 and bar_date.hour is 17:\n # action = ActionEvent(s, 'CLOSE_ALL')\n # self.events.put(action)\n # return\n R_max = np.max(highs[-self.short_window:])\n R_min = np.min(lows[-self.short_window:])\n R = (R_max - R_min) * 10000\n R = round(R, 1)\n\n R2_max = np.max(highs[-self.long_window:])\n R2_min = np.min(lows[-self.long_window:])\n R2 = (R2_max - R2_min) * 10000\n R2 = round(R2, 1)\n\n real_date = bar_date+timedelta(hours=4)\n # print('<----- K 线时间 {} -----> (当前实际时间是 {} 的第一秒)'.format(bar_date, real_date))\n # print('过去 {} 个小时, 最高价是 {}, 最低价是 {}. 波动值 R2 是 {} 个 Pips.'.format( 4*self.long_window, R2_max, R2_min, R2))\n if R2 < self.c1 or R2 > self.c2:\n # print('当前 R2 波动值不满足限制条件: {} < R2 < {}'.format(self.c1, self.c2))\n # print('不交易,略过。\\n\\n')\n return\n\n # print('当前 R2 波动值满足限制条件: {} < R2 < {} \\n'.format(self.c1, self.c2))\n # print('过去 {} 个小时, 最高价是 {}, 最低价是 {}. 波动值 R 是 {} 个 Pips.'.format( 4*self.short_window, R_max, R_min, R))\n\n buy_under = round(self.k1 * R, 1)\n limit_price = round(close - buy_under/10000, 5)\n # print('当前价格是 {}. {} 倍的 R 是 {} 个 pips '.format(close,self.k1, buy_under))\n # print('开一个限价的买单 (Limit Buy Order) 在当前价格 {} 的 {} 个 pips 之下,即 {}.'.format(close, buy_under, limit_price))\n\n profit_target = round(self.k2 * R, 1)\n # print('目标盈利 ( profit_target ) 是 {} 倍的 R,即 {} 个 pips.'.format(self.k2, profit_target))\n profit_target = round(limit_price + profit_target / 10000, 5)\n # print('即, {}'.format(profit_target))\n # print('止损 (stop_loss) 为固定的 {} 个 pips.'.format(self.sl))\n stop_loss = round(limit_price - self.sl / 10000, 5)\n # print('即, {}'.format(stop_loss))\n signal_type = 'LONG'\n signal = SignalEvent(s, real_date, signal_type, 'LMT',\n limit_price, stop_loss, profit_target)\n self.events.put(signal)" ]
[ "0.6157707", "0.579232", "0.5664724", "0.5641533", "0.5639043", "0.55788285", "0.5577999", "0.5516711", "0.5514344", "0.54846823", "0.5480096", "0.5448836", "0.54019326", "0.53919315", "0.5380245", "0.53151876", "0.5282395", "0.52394223", "0.52324045", "0.5215534", "0.52070886", "0.5180305", "0.516736", "0.51620936", "0.51585335", "0.51403695", "0.512709", "0.5126571", "0.5120218", "0.5115067", "0.5086404", "0.5074096", "0.5062015", "0.50370944", "0.5036341", "0.50323546", "0.5028114", "0.50238645", "0.5019375", "0.500778", "0.49979112", "0.49978358", "0.49941865", "0.49862266", "0.49862266", "0.4982912", "0.49800628", "0.49749303", "0.4974235", "0.49677", "0.49536008", "0.49401614", "0.49251646", "0.49235615", "0.4916814", "0.4906797", "0.4898286", "0.4889855", "0.48898083", "0.48863405", "0.4884756", "0.48803115", "0.48801297", "0.48697805", "0.48683125", "0.48613495", "0.48577785", "0.48475483", "0.48415577", "0.4841485", "0.48403698", "0.48345807", "0.4831161", "0.48288396", "0.48197335", "0.48128286", "0.4800734", "0.47973698", "0.47872072", "0.47805914", "0.47702536", "0.4769619", "0.47627258", "0.47613704", "0.4760477", "0.47570184", "0.4755311", "0.47531188", "0.47519982", "0.47502783", "0.47486222", "0.47353548", "0.47346994", "0.47345325", "0.47342646", "0.47269428", "0.47253767", "0.4724077", "0.47235307", "0.47225124", "0.47222894" ]
0.0
-1
Unwraps the private key into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey object
def unwrap(self): if self.algorithm == 'rsa': return self.asn1['private_key'].parsed if self.algorithm == 'dsa': params = self.asn1['private_key_algorithm']['parameters'] return DSAPrivateKey({ 'version': 0, 'p': params['p'], 'q': params['q'], 'g': params['g'], 'public_key': self.public_key.unwrap(), 'private_key': self.asn1['private_key'].parsed, }) if self.algorithm == 'ec': output = self.asn1['private_key'].parsed output['parameters'] = self.asn1['private_key_algorithm']['parameters'] output['public_key'] = self.public_key.unwrap() return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)", "def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()", "def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def _createPrivateKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n if not key.hasPrivateKey():\r\n raise AssertionError()\r\n return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,\r\n key.dQ, key.qInv)", "def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))", "def _get_private_key(self, privkey=None):\n\n # read private keys from keyring\n privkeys = self.gpg.list_keys(True) # True => private keys\n if len(privkeys) > 0 and privkeys[-1].has_key('fingerprint'):\n fingerprints = []\n for k in privkeys:\n fingerprints.append(k['fingerprint'])\n else:\n # no private key in keyring\n return None\n\n if privkey:\n # check for existence of private key received as argument\n # DEVEL: check for expiration as well\n if len(privkey) > 7 and len(privkey) <= 40:\n for fp in fingerprints:\n if fp.endswith(privkey):\n # work with last 16 significant chars internally,\n # even if only 8 are required in trac.ini\n privkey = fp[-16:]\n break\n # no fingerprint matching key ID\n else:\n privkey = None\n else:\n # reset invalid key ID\n privkey = None\n else:\n # select (last) private key from keyring\n privkey = fingerprints[-1][-16:]\n\n return privkey", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)", "def private_key(self):\n if self._private_key is not None:\n return self._private_key[0]\n\n spk = self.serialized_private_key\n passphrase = self.passphrase\n\n try:\n self._private_key = [\n serialization.load_pem_private_key(\n self.serialized_private_key,\n backend=default_backend(),\n password=self.passphrase)]\n\n return self._private_key[0]\n\n except:\n raise\n self._private_key = [None]\n return self._private_key[0]", "def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def validate_privatekey_pem(key_pem):\n assert isinstance(key_pem, str)\n\n private_key_cryptography = serialization.load_pem_private_key(\n data=key_pem.encode('ascii'),\n password=None,\n backend=cryptography_default_backend\n )\n\n if not isinstance(private_key_cryptography, rsa.RSAPrivateKey):\n sys.exit('Unexpected private key type')\n\n return private_key_cryptography", "def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]", "def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))", "def private_key(self):\n return PrivateKey(self._sk.private_bytes(\n encoding=serialization.Encoding.Raw,\n format=serialization.PrivateFormat.Raw,\n encryption_algorithm=serialization.NoEncryption()))", "def private_key(\n self,\n key: str,\n default: Any = undefined,\n description: str = None,\n key_format: Optional[EncryptionKeyFormat] = None,\n passphrase: Optional[str] = None,\n **kwargs\n ) -> Optional[PrivateKey]:\n cast_key = partial(cast_private_key, key_format=key_format, passphrase=passphrase)\n return self._process(key, description=description, default=default, cast=cast_key,type=PrivateKey, **kwargs)", "def serializePrivateKey(private_key):\n\treturn private_key.private_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PrivateFormat.PKCS8,\n\t\tencryption_algorithm=serialization.NoEncryption()\n\t)", "def rsa_private_key(ctx, key_size=\"4096\"):\n rsa_key_size = int(key_size)\n\n key = rsa.generate_private_key(public_exponent=65537, key_size=rsa_key_size, backend=default_backend())\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def get_key_from_keyring(self):\n private_key = keyring.get_password(self.keyring_service_name, \"private_key\")\n\n if private_key is not None:\n return base64.b64decode(private_key)\n else:\n return None", "def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)", "def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)", "def export_private_key(self, private_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_export_private_key(self.ctx, private_key.c_impl, error)\n VscfStatus.handle_status(error.status)\n instance = RawPrivateKey.take_c_ctx(result)\n return instance", "def load_private_key_der(self, private_key_der):\n return self.load_private_key(SigningKey.from_der(private_key_der))", "def decrypt_using_private_key(message):\n public_key_path = os.path.join('keys', 'private.key')\n with open(public_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n cipher = PKCS1_OAEP.new(private_key)\n encrypted = cipher.decrypt(message)\n return encrypted.hex()", "def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()", "def __init__(self, private_key):\n if private_key:\n if isinstance(private_key, str): # base58 encoded string\n self.private_key = PrivateKey.from_b58check(private_key)\n else:\n self.private_key = private_key\n self.public_key = self.private_key.public_key\n else:\n self.private_key = None\n self.public_key = None", "def test_get_private_key(self):\n\n expected = self.pem_private_key\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n actual = encryptor.get_private_key()\n\n self.assertEqual(expected, actual)", "def _try_load_ca_private_key(path):\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'rb').read())\n if pkey.bits() < 2048:\n raise ValueError(\"I'm sorry Dave, I can't let you use a small \"\n \"RSA key.\")\n pkey.check()\n return pkey", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {'privateExponent': util.Decode(rsa['privateExponent']),\n 'primeP': util.Decode(rsa['primeP']),\n 'primeQ': util.Decode(rsa['primeQ']),\n 'primeExponentP': util.Decode(rsa['primeExponentP']),\n 'primeExponentQ': util.Decode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Decode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "async def retrieve_private_key(self) -> Tuple[str, str]:\n\n filename, file_path = random.choice(self._private_keys)\n async with aiofiles.open(file_path, mode='r') as file:\n private_key = await file.read()\n return private_key, self._create_public_key_identifier(filename)", "def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")", "def load_private_key_pem(self, private_key_pem):\n return self.load_private_key(SigningKey.from_pem(private_key_pem))", "def read_private_key_file(pkey_file,\n pkey_password=None,\n key_type=None,\n logger=None):\n ssh_pkey = None\n key_types = (paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey)\n if hasattr(paramiko, 'Ed25519Key'):\n # NOQA: new in paramiko>=2.2: http://docs.paramiko.org/en/stable/api/keys.html#module-paramiko.ed25519key\n key_types += (paramiko.Ed25519Key, )\n for pkey_class in (key_type,) if key_type else key_types:\n try:\n ssh_pkey = pkey_class.from_private_key_file(\n pkey_file,\n password=pkey_password\n )\n if logger:\n logger.debug('Private key file ({0}, {1}) successfully '\n 'loaded'.format(pkey_file, pkey_class))\n break\n except paramiko.PasswordRequiredException:\n if logger:\n logger.error('Password is required for key {0}'\n .format(pkey_file))\n break\n except paramiko.SSHException:\n if logger:\n logger.debug('Private key file ({0}) could not be loaded '\n 'as type {1} or bad password'\n .format(pkey_file, pkey_class))\n return ssh_pkey", "def get_private_key_pem( pkey_path ):\n \n # get the OpenCloud private key \n observer_pkey = syndicate_storage.read_private_key( pkey_path )\n if observer_pkey is None:\n logger.error(\"Failed to load Observer private key\")\n return None\n \n observer_pkey_pem = observer_pkey.exportKey()\n \n return observer_pkey_pem", "def get_private_key(self, address58: str) -> 'EllipticCurvePrivateKey':\n return self.keys[address58]", "def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)", "def kem_decapsulate(self, encapsulated_key, private_key):\n d_encapsulated_key = Data(encapsulated_key)\n shared_key = Buffer(self.kem_shared_key_len(key=private_key))\n status = self._lib_vscf_ecc.vscf_ecc_kem_decapsulate(self.ctx, d_encapsulated_key.data, private_key.c_impl, shared_key.c_buffer)\n VscfStatus.handle_status(status)\n return shared_key.get_bytes()", "def public_from_private(self, private_key):", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {\n 'privateExponent': util.Base64WSDecode(rsa['privateExponent']),\n 'primeP': util.Base64WSDecode(rsa['primeP']),\n 'primeQ': util.Base64WSDecode(rsa['primeQ']),\n 'primeExponentP': util.Base64WSDecode(rsa['primeExponentP']),\n 'primeExponentQ': util.Base64WSDecode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Base64WSDecode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def test_rsa_key(self):\n key1 = generate_private_key(u'rsa')\n self.assertIsInstance(key1,rsa.RSAPrivateKey)\n key2 = generate_private_key(u'rsa')\n self.assertIsInstance(key2, rsa.RSAPrivateKey)\n self.assertNotEqual(\n key1.public_key().public_numbers(),\n key2.public_key().public_numbers()\n )", "def deserializePrivateKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_private_key(string, password = None , backend = bc)", "def _load_private_key(self, filename, keytype=None):\n type_map = {\n 'dsa': ssh.DSSKey,\n 'rsa': ssh.RSAKey}\n\n if keytype is None:\n with open(filename, 'rb') as k:\n keydata = k.read()\n \n m = re.search(\"BEGIN (.*?) PRIVATE KEY\", keydata)\n if m:\n keytype = m.group(1)\n\n keycls = type_map.get(keytype.lower(), 'dsa')\n\n try:\n key = keycls.from_private_key_file(filename)\n log.debug(\"Loaded key '%s' without password.\", filename)\n except ssh.PasswordRequiredException:\n passphrase = self.config.get('passphrase')\n \n if callable(passphrase):\n passphrase = passphrase(filename,\n self.config.get('remote_host', 'localhost'),\n self.config.get('username', getpass.getuser()))\n if passphrase is None:\n return\n\n if not passphrase:\n passphrase = getpass.getpass(\"Key passphrase: \")\n \n key = keycls.from_private_key_file(filename, passphrase)\n\n return key", "def gen_priv_key(key_size: int = 2048) -> rsa.RSAPrivateKey:\n return rsa.generate_private_key(public_exponent=65537, key_size=key_size)", "def generate_rsa_private_key(key_size: int = 2048, exponent: int = 65537) -> RSAPrivateKeyWithSerialization:\n return rsa.generate_private_key(backend=default_backend(), public_exponent=exponent, key_size=key_size)", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def get_private_key():\n if not os.path.exists(_private_key_path):\n return None\n\n try:\n with open(_private_key_path) as secret_file:\n return secret_file.read()\n\n except Exception as exc:\n log.error(f'Could not read private key.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def generate_private_key(self):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key generation.\")\n return self.load_private_key(SigningKey.generate(curve=self.curve))", "def get_private_key(self):\n return self._private_key", "def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key", "def fromPrivkey(cls, ecdsaPrivkey):\n rawPrivkey = cls.PRIVATE_KEY_PREFIX + ecdsaPrivkey.to_string()\n privkey = b2a_hashed_base58(rawPrivkey)\n\n ecdsaPubkey = ecdsaPrivkey.get_verifying_key()\n rawPubkey = cls.PUBLIC_KEY_PREFIX + hash160(\n \"\\x04\" + ecdsaPubkey.to_string())\n pubkey = b2a_hashed_base58(rawPubkey)\n\n return cls(pubkey, privkey)", "def __decryptRSA(msg, user):\n # Load user's private key\n try:\n with open(\"%s/%s/keys/privateKey.pem\" % (USERS, user), \"rb\") as f:\n privateKey = serialization.load_pem_private_key(\n f.read(),\n password=None,\n backend=default_backend()\n )\n f.close()\n except:\n print(\"Error opening user's private key\")\n print(sys.exc_info())\n return None\n \n # Decrypt message\n return privateKey.decrypt(\n msg, \n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)", "def import_private_key(self, raw_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_import_private_key(self.ctx, raw_key.ctx, error)\n VscfStatus.handle_status(error.status)\n instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))\n return instance", "def decrypt(self, key, device, private_key):\n device_key = base64.b64decode(self.keys[device.id.hex])\n\n master_key = private_key_decrypt(private_key, device_key)\n\n if master_key is None:\n return\n\n return fernet_decrypt(self.values[key], master_key, self.salt)", "def test_use_privatekey_wrong_key(self, ctx_or_conn):\n key = PKey()\n key.generate_key(TYPE_RSA, 1024)\n ctx_or_conn.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n with pytest.raises(Error):\n ctx_or_conn.use_privatekey(key)", "def _rsa_keydict_to_keyobj(publickey = None, privatekey = None):\r\n if publickey is not None:\r\n if not rsa_is_valid_publickey(publickey):\r\n raise ValueError, \"Invalid public key\"\r\n \r\n if privatekey is not None: \r\n if not rsa_is_valid_privatekey(privatekey):\r\n raise ValueError, \"Invalid private key\"\r\n \r\n if publickey is None and privatekey is None:\r\n raise TypeError(\"Must provide either private or public key dictionary\")\r\n\r\n if publickey is None: \r\n publickey = {}\r\n if privatekey is None: \r\n privatekey = {}\r\n \r\n n = None \r\n e = None\r\n d = None\r\n p = None\r\n q = None\r\n \r\n if 'd' in privatekey: \r\n d = long(privatekey['d'])\r\n if 'p' in privatekey: \r\n p = long(privatekey['p'])\r\n if 'q' in privatekey: \r\n q = long(privatekey['q']) \r\n \r\n if 'n' in publickey: \r\n n = long(publickey['n'])\r\n # n is needed for a private key even thought it is not\r\n # part of the standard public key dictionary.\r\n else: n = p*q \r\n if 'e' in publickey: \r\n e = long(publickey['e'])\r\n \r\n rsa_implementation = RSA_RSAImplementation()\r\n rsa_key = rsa_implementation.construct((n,e,d,p,q))\r\n \r\n return rsa_key", "def load_private_key(filename):\n\twith open(str(filename) + \"_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_private_key(\n\t\tkey_file.read(),\n\t\tpassword=None,\n\t\tbackend=default_backend()\n\t)", "def _rsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n content.write_string('ssh-rsa')\n content.write_mpint(numbers.public_numbers.n)\n content.write_mpint(numbers.public_numbers.e)\n content.write_mpint(numbers.d)\n content.write_mpint(numbers.iqmp)\n content.write_mpint(numbers.p)\n content.write_mpint(numbers.q)\n return content.data", "def __init__(self, private_key):\n self._sk = ed25519.Ed25519PrivateKey.from_private_bytes(private_key.bytes)", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def _unarmor_pem_openssl_private(headers, data, password):\n\n enc_algo = None\n enc_iv_hex = None\n enc_iv = None\n\n if 'DEK-Info' in headers:\n params = headers['DEK-Info']\n if params.find(',') != -1:\n enc_algo, enc_iv_hex = params.strip().split(',')\n else:\n enc_algo = 'RC4'\n\n if not enc_algo:\n return data\n\n if enc_iv_hex:\n enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))\n enc_algo = enc_algo.lower()\n\n enc_key_length = {\n 'aes-128-cbc': 16,\n 'aes-128': 16,\n 'aes-192-cbc': 24,\n 'aes-192': 24,\n 'aes-256-cbc': 32,\n 'aes-256': 32,\n 'rc4': 16,\n 'rc4-64': 8,\n 'rc4-40': 5,\n 'rc2-64-cbc': 8,\n 'rc2-40-cbc': 5,\n 'rc2-cbc': 16,\n 'rc2': 16,\n 'des-ede3-cbc': 24,\n 'des-ede3': 24,\n 'des3': 24,\n 'des-ede-cbc': 16,\n 'des-cbc': 8,\n 'des': 8,\n }[enc_algo]\n\n enc_key = hashlib.md5(password + enc_iv[0:8]).digest()\n while enc_key_length > len(enc_key):\n enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()\n enc_key = enc_key[0:enc_key_length]\n\n enc_algo_name = {\n 'aes-128-cbc': 'aes',\n 'aes-128': 'aes',\n 'aes-192-cbc': 'aes',\n 'aes-192': 'aes',\n 'aes-256-cbc': 'aes',\n 'aes-256': 'aes',\n 'rc4': 'rc4',\n 'rc4-64': 'rc4',\n 'rc4-40': 'rc4',\n 'rc2-64-cbc': 'rc2',\n 'rc2-40-cbc': 'rc2',\n 'rc2-cbc': 'rc2',\n 'rc2': 'rc2',\n 'des-ede3-cbc': 'tripledes',\n 'des-ede3': 'tripledes',\n 'des3': 'tripledes',\n 'des-ede-cbc': 'tripledes',\n 'des-cbc': 'des',\n 'des': 'des',\n }[enc_algo]\n decrypt_func = crypto_funcs[enc_algo_name]\n\n if enc_algo_name == 'rc4':\n return decrypt_func(enc_key, data)\n\n return decrypt_func(enc_key, data, enc_iv)", "def get_principal_pkey( user_email, observer_secret ):\n \n sp = get_principal_data( user_email )\n if sp is None:\n logger.error(\"Failed to find private key for principal %s\" % user_email )\n return None \n \n public_key_pem = sp.public_key_pem\n sealed_private_key_pem = sp.sealed_private_key\n\n # unseal\n private_key_pem = verify_and_unseal_blob(public_key_pem, observer_secret, sealed_private_key_pem)\n if private_key_pem is None:\n logger.error(\"Failed to unseal private key\")\n\n return private_key_pem", "def _decrypt_pvtkey(self, pvtkey_file: str, passphrase: str) -> str:\n\n keydata: str = None\n if pvtkey_file:\n try:\n keydata = asyncssh.public_key.read_private_key(pvtkey_file,\n passphrase)\n except Exception as e:\n self.logger.error(\n f\"ERROR: Unable to read private key file {pvtkey_file}\"\n f\"for jump host due to {str(e)}\")\n\n return keydata", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def ed25519_private_key(ctx):\n\n key = ed25519.Ed25519PrivateKey.generate()\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def private_key(self):", "def read_chain_pair(private_key, certificates):\n with open(private_key, 'rb') as f:\n private_key = f.read()\n with open(certificates, 'rb') as f:\n certificates = f.read()\n return (private_key, certificates)", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)", "def build(self, signing_private_key):\n\n is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)\n if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:\n raise TypeError(_pretty_message(\n '''\n signing_private_key must be an instance of\n asn1crypto.keys.PrivateKeyInfo or\n oscrypto.asymmetric.PrivateKey, not %s\n ''',\n _type_name(signing_private_key)\n ))\n\n if self._self_signed is not True and self._issuer is None:\n raise ValueError(_pretty_message(\n '''\n Certificate must be self-signed, or an issuer must be specified\n '''\n ))\n\n if self._self_signed:\n self._issuer = self._subject\n\n if self._serial_number is None:\n time_part = int_to_bytes(int(time.time()))\n random_part = util.rand_bytes(4)\n self._serial_number = int_from_bytes(time_part + random_part)\n\n if self._begin_date is None:\n self._begin_date = datetime.now(timezone.utc)\n\n if self._end_date is None:\n self._end_date = self._begin_date + timedelta(365)\n\n if not self.ca:\n for ca_only_extension in set(['policy_mappings', 'policy_constraints', 'inhibit_any_policy']):\n if ca_only_extension in self._other_extensions:\n raise ValueError(_pretty_message(\n '''\n Extension %s is only valid for CA certificates\n ''',\n ca_only_extension\n ))\n\n signature_algo = signing_private_key.algorithm\n if signature_algo == 'ec':\n signature_algo = 'ecdsa'\n\n signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)\n\n # RFC 3280 4.1.2.5\n def _make_validity_time(dt):\n if dt < datetime(2050, 1, 1, tzinfo=timezone.utc):\n value = x509.Time(name='utc_time', value=dt)\n else:\n value = x509.Time(name='general_time', value=dt)\n\n return value\n\n def _make_extension(name, value):\n return {\n 'extn_id': name,\n 'critical': self._determine_critical(name),\n 'extn_value': value\n }\n\n extensions = []\n for name in sorted(self._special_extensions):\n value = getattr(self, '_%s' % name)\n if name == 'ocsp_no_check':\n value = core.Null() if value else None\n if value is not None:\n extensions.append(_make_extension(name, value))\n\n for name in sorted(self._other_extensions.keys()):\n extensions.append(_make_extension(name, self._other_extensions[name]))\n\n tbs_cert = x509.TbsCertificate({\n 'version': 'v3',\n 'serial_number': self._serial_number,\n 'signature': {\n 'algorithm': signature_algorithm_id\n },\n 'issuer': self._issuer,\n 'validity': {\n 'not_before': _make_validity_time(self._begin_date),\n 'not_after': _make_validity_time(self._end_date),\n },\n 'subject': self._subject,\n 'subject_public_key_info': self._subject_public_key,\n 'extensions': extensions\n })\n\n if signing_private_key.algorithm == 'rsa':\n sign_func = asymmetric.rsa_pkcs1v15_sign\n elif signing_private_key.algorithm == 'dsa':\n sign_func = asymmetric.dsa_sign\n elif signing_private_key.algorithm == 'ec':\n sign_func = asymmetric.ecdsa_sign\n\n if not is_oscrypto:\n signing_private_key = asymmetric.load_private_key(signing_private_key)\n signature = sign_func(signing_private_key, tbs_cert.dump(), self._hash_algo)\n\n return x509.Certificate({\n 'tbs_certificate': tbs_cert,\n 'signature_algorithm': {\n 'algorithm': signature_algorithm_id\n },\n 'signature_value': signature\n })", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def _generate_ca_private_key(path):\n DEFAULT_KEY_ALG = crypto.TYPE_RSA\n DEFAULT_KEY_BITS = 2048\n\n pkey = crypto.PKey()\n pkey.generate_key(DEFAULT_KEY_ALG, DEFAULT_KEY_BITS)\n data = crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)\n open(path, 'wb').write(data)\n\n return pkey", "def decrypt(data, private_key):\r\n\r\n # Retrieve session key, tag, ciphertext and nonce from file\r\n enc_session_key, nonce, tag, ciphertext = \\\r\n [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]\r\n\r\n\r\n # Decrypt the session key\r\n session_key = cipher_rsa.decrypt(enc_session_key)\r\n\r\n # Decrypt the data with the AES session key\r\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\r\n\r\n return data", "def tls_certificate_private_key_pem_path(tls_certificate):\n with tls_certificate.private_key_pem.tempfile() as cert_key_pem:\n yield cert_key_pem", "def load_private_key(file_path: str, password: bytes = None,\n encoding: Encoding = None) -> PrivateKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PrivateKey:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param key_data: given private keys data\n :return: loaded private key\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())\n\n return generic_load(file_path, solve)", "def rsa_string_to_privatekey(mystr):\r\n if len(mystr.split()) != 3:\r\n raise ValueError, \"Invalid private key string\"\r\n \r\n return {'d':long(mystr.split()[0]), 'p':long(mystr.split()[1]), 'q':long(mystr.split()[2])}", "def get_ca_private_key():\n return _try_load_ca_private_key(cfg.ca_private_key_path())", "def private_key_to_address(private_key: PrivateKeyType) -> Address:\n privkey = PrivateKey(private_key)\n return public_key_to_address(privkey.public_key)", "def get_private_key(self) -> str:\n\t\treturn self._privateKey", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")", "def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )", "def find_private_key(self):\n\t\tp, q = self.find_hidden_primes()\n\t\tself.private_key = self.calculate_private_key(p, q)\n\t\treturn self.private_key", "def cred_extract(self, rp_id: str, credential_id: bytes) -> Optional[ec.EllipticCurvePrivateKey]:\n rp_id_hash = sha256(rp_id.encode())\n\n aesgcm = AESGCM(self.master_key)\n aad = rp_id_hash\n nonce = sha256(aad + self.master_key)[4:16]\n\n try:\n data = aesgcm.decrypt(nonce, credential_id, aad)\n return ec.derive_private_key(int.from_bytes(data, 'big'), ec.SECP256R1(), default_backend())\n except cryptography.exceptions.InvalidTag:\n return None", "def rsa_privatekey_to_string(privatekey):\r\n if not rsa_is_valid_privatekey(privatekey):\r\n raise ValueError, \"Invalid private key\"\r\n\r\n return str(privatekey['d'])+\" \"+str(privatekey['p'])+\" \"+str(privatekey['q'])" ]
[ "0.7709716", "0.7091911", "0.6661885", "0.6650223", "0.6593615", "0.6553231", "0.64998823", "0.6498649", "0.6429571", "0.64050627", "0.6381211", "0.6249152", "0.6246417", "0.6217567", "0.62138116", "0.6204138", "0.6198371", "0.6193991", "0.6141875", "0.6141632", "0.6117683", "0.6094897", "0.60794103", "0.6072148", "0.6068357", "0.6056227", "0.6046553", "0.6008377", "0.60045433", "0.6002406", "0.59921217", "0.59703594", "0.59275043", "0.5867883", "0.5847706", "0.5788413", "0.57697606", "0.5767147", "0.5751226", "0.5741767", "0.5741147", "0.5731536", "0.5729223", "0.57161385", "0.5709488", "0.570611", "0.5705071", "0.569006", "0.5687509", "0.5681143", "0.56789607", "0.5677262", "0.5662733", "0.56562066", "0.5654285", "0.5633819", "0.5620194", "0.56182194", "0.5612803", "0.5593423", "0.55894995", "0.5587099", "0.55866957", "0.55713224", "0.5555852", "0.5544794", "0.5539685", "0.55256796", "0.55162877", "0.55075955", "0.54940015", "0.5492375", "0.5489541", "0.5475332", "0.54742223", "0.54689074", "0.5453934", "0.5432456", "0.5423313", "0.5405679", "0.5405473", "0.54033726", "0.539143", "0.53793854", "0.53675854", "0.5366154", "0.5365795", "0.5362358", "0.53483987", "0.53461677", "0.5334203", "0.5334203", "0.5334203", "0.5334203", "0.5334203", "0.5334203", "0.53233474", "0.5321003", "0.5318527", "0.53122556" ]
0.7861159
0
Unwraps a public key into an asn1crypto.keys.RSAPublicKey, asn1crypto.core.Integer (for DSA) or asn1crypto.keys.ECPointBitString object
def unwrap(self): if self.algorithm == 'ec': return self.asn1['public_key'] return self.asn1['public_key'].parsed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def get_pub_rsa_key(pub_key):\n return RSA.importKey(pub_key)", "def load_pub_key_bytes(bs: bytes) -> rsa.RSAPublicKey:\n k = serialization.load_pem_public_key(bs)\n assert isinstance(k, rsa.RSAPublicKey)\n return k", "def _wrap_publickey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PublicKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize public key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_public_key(der, backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def parse_public(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, algo, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a public key or\n certificate, but rather a private key\n '''\n ))\n\n # When a public key returning from _unarmor_pem has a known algorithm\n # of RSA, that means the DER structure is of the type RSAPublicKey, so\n # we need to wrap it in the PublicKeyInfo structure.\n if algo == 'rsa':\n return PublicKeyInfo.wrap(data, 'rsa')\n\n if key_type is None or key_type == 'public key':\n try:\n pki = PublicKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PublicKeyInfo\n\n try:\n rpk = RSAPublicKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n rpk.native\n return PublicKeyInfo.wrap(rpk, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPublicKey\n\n if key_type is None or key_type == 'certificate':\n try:\n parsed_cert = Certificate.load(data)\n key_info = parsed_cert['tbs_certificate']['subject_public_key_info']\n return key_info\n except (ValueError):\n pass # Data was not a cert\n\n raise ValueError('The data specified does not appear to be a known public key or certificate format')", "def rsa_public_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PublicKeyInfo())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid public key encoding.\")\n\n return decoded_key[\"publicKey\"].asOctets()", "def serializePublicKey(public_key):\n\treturn public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t)", "def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:\n key = ECKey(privkey)\n return key.get_public_key(compressed)", "def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()", "def extractPublicKey(cert):\n pk = cert.get_pubkey()\n\n b = _util.binding\n l = b.lib\n ffi = b.ffi\n rsa = l.EVP_PKEY_get1_RSA(pk._pkey)\n buf = ffi.new(\"unsigned char **\")\n length = l.i2d_RSA_PUBKEY(rsa, buf)\n pk = ffi.buffer(buf[0], length)[:]\n ffi.gc(buf[0], l.OPENSSL_free)\n return pk", "def get_pub_key(priv_key: rsa.RSAPrivateKey) -> rsa.RSAPublicKey:\n return priv_key.public_key()", "def _get_keyidv2(pubkey: SupportedKeyTypes) -> int:\n if isinstance(pubkey, RSAPublicKey):\n fmt = serialization.PublicFormat.PKCS1\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER, format=fmt)\n elif isinstance(pubkey, EllipticCurvePublicKey):\n fmt = serialization.PublicFormat.UncompressedPoint\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.X962, format=fmt)\n else:\n raise UnsupportedAlgorithm(f\"Unsupported public key type {type(pubkey)}\")\n\n default_be = backends.default_backend()\n digest = hashes.Hash(hashes.SHA1(), backend=default_be)\n digest.update(pubbytes)\n keydigest = digest.finalize()\n return int.from_bytes(keydigest[16:], \"big\")", "def Read(key):\n rsa = json.loads(key)\n params = {\n 'modulus': util.Base64WSDecode(rsa['modulus']),\n 'publicExponent': util.Base64WSDecode(rsa['publicExponent'])\n }\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])", "def _get_pubkey_from_pem_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_pem_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def _createPublicKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n return _createPublicRSAKey(key.n, key.e)", "def Read(key):\n rsa = json.loads(key)\n params = {'modulus' : util.Decode(rsa['modulus']),\n 'publicExponent' : util.Decode(rsa['publicExponent'])}\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])", "def rsa_public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: RSA public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|rsapublic'\"\n )\n\n public_key(ctx)", "def _get_pubkey_from_der_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_der_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|publickey'\"\n )\n\n data_dec = ctx.data\n if ctx.ref_encoding == \"base64\":\n data_dec = base64.b64decode(data_dec).decode()\n\n private_key = serialization.load_pem_private_key(\n data_dec.encode(), password=None, backend=default_backend()\n )\n public_key = private_key.public_key()\n\n ctx.data = str(\n public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ),\n \"UTF-8\",\n )", "def rsa_publickey_to_string(publickey):\r\n if not rsa_is_valid_publickey(publickey):\r\n raise ValueError, \"Invalid public key\"\r\n\r\n return str(publickey['e'])+\" \"+str(publickey['n'])", "def deserializePublicKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_public_key(string , backend = bc)", "def rsa_string_to_publickey(mystr):\r\n if len(mystr.split()) != 2:\r\n raise ValueError, \"Invalid public key string\"\r\n \r\n return {'e':long(mystr.split()[0]), 'n':long(mystr.split()[1])}", "def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...", "def export_public_key(self, public_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_export_public_key(self.ctx, public_key.c_impl, error)\n VscfStatus.handle_status(error.status)\n instance = RawPublicKey.take_c_ctx(result)\n return instance", "def test_public_key_rsa(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_RSA)", "def solve(key_data: bytes) -> PublicKey:\n return { # type: ignore\n Encoding.PEM: load_pem_public_key,\n Encoding.DER: load_der_public_key\n }[real_encoding](key_data, default_backend())", "def rsa_file_to_publickey(filename):\r\n fileobject = file(filename,'r')\r\n publickeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_publickey(publickeystring)", "def get_public_key_in_pem(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def get_pub_key(self):\n return \"RSA {0}\".format(self._cert.get_pubkey().bits)", "def extractPubKey(script):\n pubkey = extractCompressedPubKey(script)\n if pubkey:\n return pubkey\n return extractUncompressedPubKey(script)", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def public_key_to_address(public_key: PublicKey) -> Address:\n key_bytes = public_key.format(compressed=False)\n return Address(keccak(key_bytes[1:])[-20:])", "def _rsa_keydict_to_keyobj(publickey = None, privatekey = None):\r\n if publickey is not None:\r\n if not rsa_is_valid_publickey(publickey):\r\n raise ValueError, \"Invalid public key\"\r\n \r\n if privatekey is not None: \r\n if not rsa_is_valid_privatekey(privatekey):\r\n raise ValueError, \"Invalid private key\"\r\n \r\n if publickey is None and privatekey is None:\r\n raise TypeError(\"Must provide either private or public key dictionary\")\r\n\r\n if publickey is None: \r\n publickey = {}\r\n if privatekey is None: \r\n privatekey = {}\r\n \r\n n = None \r\n e = None\r\n d = None\r\n p = None\r\n q = None\r\n \r\n if 'd' in privatekey: \r\n d = long(privatekey['d'])\r\n if 'p' in privatekey: \r\n p = long(privatekey['p'])\r\n if 'q' in privatekey: \r\n q = long(privatekey['q']) \r\n \r\n if 'n' in publickey: \r\n n = long(publickey['n'])\r\n # n is needed for a private key even thought it is not\r\n # part of the standard public key dictionary.\r\n else: n = p*q \r\n if 'e' in publickey: \r\n e = long(publickey['e'])\r\n \r\n rsa_implementation = RSA_RSAImplementation()\r\n rsa_key = rsa_implementation.construct((n,e,d,p,q))\r\n \r\n return rsa_key", "def __init__(self, pubkey, e=65537):\n if isinstance(pubkey, int):\n self.key = RSA.RsaKey(n=pubkey, e=e)\n\n else:\n if not isinstance(pubkey, str):\n raise ValueError('pubkey must be str or int.')\n\n if '----' in pubkey:\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)\n else:\n if pubkey == pubkey.lower():\n pubkey = int(pubkey, 16)\n self.key = RSA.RsaKey(n=pubkey, e=e)\n else:\n pubkey = '-----BEGIN PUBLIC KEY-----\\n' + pubkey + '\\n-----END PUBLIC KEY-----'\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()", "def privatekey_to_publickey(private_key_bin: bytes) -> bytes:\n if not ishash(private_key_bin):\n raise ValueError('private_key_bin format mismatch. maybe hex encoded?')\n private_key = PrivateKey(private_key_bin)\n return private_key.public_key.format(compressed=False)", "def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')", "def rsa_public_key(self, modulus: int, exponent: int) -> rsa.RSAPublicKey:\n return rsa.RSAPublicNumbers(exponent, modulus).public_key(default_backend())", "def rsa_verify(cypher, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions. \r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.verify)", "def get_public_key_in_der(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def get_pubkey_from_file(filename: str) -> Tuple[Optional[SupportedKeyTypes], Optional[int]]:\n with open(filename, \"rb\") as fobj:\n filedata = fobj.read()\n pubkey, keyidv2 = get_pubkey(filedata)\n if pubkey:\n if not isinstance(pubkey, (RSAPublicKey, EllipticCurvePublicKey)):\n raise ValueError(f\"Unsupported key type {type(pubkey).__name__}\")\n return pubkey, keyidv2\n\n return None, None", "def public_key(self) -> PublicKey:\n pass", "def convert_public_key_to_ecdsa(self, public_key):\n return PublicKey.fromPem('\\n-----BEGIN PUBLIC KEY-----\\n'+public_key+'\\n-----END PUBLIC KEY-----\\n')", "def load_received_public_key_bytes(self, public_key_str):\n return self.load_received_public_key(\n VerifyingKey.from_string(public_key_str, self.curve))", "def import_public_key(self, raw_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_import_public_key(self.ctx, raw_key.ctx, error)\n VscfStatus.handle_status(error.status)\n instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))\n return instance", "def __init__(self, public_key):\n self._pk = ed25519.Ed25519PublicKey.from_public_bytes(public_key.bytes)", "def __init__(self, rsa_key):\r\n if isinstance(rsa_key, tuple):\r\n self.keypair = Crypto.PublicKey.RSA.construct(rsa_key)\r\n else:\r\n self._InitFromString(rsa_key)", "def load_received_public_key_pem(self, public_key_pem):\n return self.load_received_public_key(VerifyingKey.from_pem(public_key_pem))", "def get_pubkey(pem):\n der = ssl.PEM_cert_to_DER_cert(pem)\n\n # Extract subjectPublicKeyInfo field from X.509 certificate (see RFC3280)\n cert = DerSequence()\n cert.decode(der)\n tbsCertificate = DerSequence()\n tbsCertificate.decode(cert[0])\n subjectPublicKeyInfo = tbsCertificate[6]\n\n return subjectPublicKeyInfo", "def _LoadSshPublicKey(ssh_public_key_path):\n key_path = os.path.expanduser(ssh_public_key_path)\n if not os.path.exists(key_path):\n raise errors.DriverError(\n \"SSH public key file %s does not exist.\" % key_path)\n\n with open(key_path) as f:\n rsa = f.read()\n rsa = rsa.strip() if rsa else rsa\n utils.VerifyRsaPubKey(rsa)\n return rsa", "def rsa_public_key_pkcs1_to_pkcs8(pkcs1_key):\n algorithm = RsaAlgorithmIdentifier()\n algorithm[\"rsaEncryption\"] = RSA_ENCRYPTION_ASN1_OID\n\n pkcs8_key = PublicKeyInfo()\n pkcs8_key[\"algorithm\"] = algorithm\n pkcs8_key[\"publicKey\"] = univ.BitString.fromOctetString(pkcs1_key)\n\n return encoder.encode(pkcs8_key)", "def test_rsa_key(self):\n key1 = generate_private_key(u'rsa')\n self.assertIsInstance(key1,rsa.RSAPrivateKey)\n key2 = generate_private_key(u'rsa')\n self.assertIsInstance(key2, rsa.RSAPrivateKey)\n self.assertNotEqual(\n key1.public_key().public_numbers(),\n key2.public_key().public_numbers()\n )", "def strip_begin_end_public_key(key):\n return key.replace(\"\\n\", \"\")\\\n .replace(\"-----BEGIN PUBLIC KEY-----\", \"\").replace(\n \"-----END PUBLIC KEY-----\", \"\")", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def import_key(key: str) -> RSA.RsaKey:\n\n return RSA.importKey(binascii.unhexlify(key))", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def save_rsa_public_key(public_key: RSAPublicKey, file_path: str, encoding: Encoding = Encoding.PEM) -> None:\n pem_data = public_key.public_bytes(encoding, serialization.PublicFormat.PKCS1)\n with open(file_path, 'wb') as f:\n f.write(pem_data)", "def key_from_keybase(username, fingerprint=None):\n url = keybase_lookup_url(username)\n resp = requests.get(url)\n if resp.status_code == 200:\n j_resp = json.loads(polite_string(resp.content))\n if 'them' in j_resp and len(j_resp['them']) == 1:\n kb_obj = j_resp['them'][0]\n if fingerprint:\n return fingerprint_from_keybase(fingerprint, kb_obj)\n else:\n if 'public_keys' in kb_obj \\\n and 'pgp_public_keys' in kb_obj['public_keys']:\n key = kb_obj['public_keys']['primary']\n return massage_key(key)\n\n return None", "def load_received_public_key(self, public_key):\n if not self.curve:\n self.curve = public_key.curve\n if self.curve != public_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.public_key = public_key", "def get_public_key(self):\n return self.private_key.get_verifying_key()", "def vscf_raw_private_key_extract_public_key(self, ctx):\n vscf_raw_private_key_extract_public_key = self._lib.vscf_raw_private_key_extract_public_key\n vscf_raw_private_key_extract_public_key.argtypes = [POINTER(vscf_raw_private_key_t)]\n vscf_raw_private_key_extract_public_key.restype = POINTER(vscf_impl_t)\n return vscf_raw_private_key_extract_public_key(ctx)", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def PublicKey(self) -> _n_9_t_1:", "def PublicKey(self) -> _n_9_t_1:", "def get_key_from_blob(blob):\n keys = pgpy.PGPKey.from_blob(blob)\n logging.debug(keys)\n return keys[0]", "def public_key(self):\n return PublicKey(self._sk.public_key().public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw))", "def get_pubkey(filedata: bytes) -> Tuple[Optional[SupportedKeyTypes], Optional[int]]:\n default_be = backends.default_backend()\n for func in [\n _get_pubkey_from_der_x509_certificate,\n _get_pubkey_from_pem_x509_certificate,\n _get_pubkey_from_der_public_key,\n _get_pubkey_from_pem_public_key,\n _get_pubkey_from_der_private_key,\n _get_pubkey_from_pem_private_key,\n ]:\n pubkey, keyidv2 = func(filedata, default_be)\n if pubkey:\n if not isinstance(pubkey, (RSAPublicKey, EllipticCurvePublicKey)):\n raise ValueError(f\"Unsupported key type {type(pubkey).__name__}\")\n return pubkey, keyidv2\n\n return None, None", "def public_key(\n self,\n key: str,\n default: Any = undefined,\n description: str = None,\n key_format: Optional[EncryptionKeyFormat] = None,\n **kwargs\n ) -> Optional[PublicKey]:\n cast_key = partial(cast_public_key, key_format=key_format)\n return self._process(key, description=description, default=default, cast=cast_key, type=PublicKey, **kwargs)", "def _get_pubkey_from_pem_x509_certificate(filedata: bytes, backend: Any) -> Tuple[Any, Optional[int]]:\n try:\n cert = x509.load_pem_x509_certificate(filedata, backend=backend)\n return cert.public_key(), _get_keyidv2_from_cert(cert)\n except Exception:\n return None, None", "def __init__(self, public_key=None):\n self.public_key = self.convert_public_key_to_ecdsa(public_key) if public_key else public_key", "def extractUncompressedPubKey(script):\n # A pay-to-compressed-pubkey script is of the form:\n # OP_DATA_65 <65-byte uncompressed pubkey> OP_CHECKSIG\n\n # All non-hybrid uncompressed secp256k1 public keys must start with 0x04.\n if (\n len(script) == 67\n and script[66] == opcode.OP_CHECKSIG\n and script[0] == opcode.OP_DATA_65\n and script[1] == 0x04\n ):\n\n return script[1:66]\n return None", "def get_key_object(self):\n key_type, data = self.key_data()\n data = base64.b64decode(data)\n\n if key_type == \"ssh-rsa\":\n key = rsakey.RSAKey(data=data)\n elif key_type == \"ssh-dss\":\n key = dsskey.DSSKey(data=data)\n else:\n raise Exception(\"Invalid key type\")\n\n return key", "def get_public_key(self):\n# _log.debug(\"get_public_key\")\n certpath, cert, certstr = self.get_own_cert()\n try:\n cert = load_pem_x509_certificate(certstr, default_backend())\n except Exception as err:\n _log.error(\"Failed to load X509 certificate from PEM, err={}\".format(err))\n raise\n return cert.public_key()", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def get_public_key_msg(\n msg: GetPublicKeyMessage,\n node: DomainInterface,\n verify_key: VerifyKey,\n) -> GetPublicKeyResponse:\n keys = node.oblv_keys.get()\n public_key_str = encodebytes(keys.public_key).decode(\"UTF-8\").replace(\"\\n\", \"\")\n\n return GetPublicKeyResponse(address=msg.reply_to, response=public_key_str)", "def load_received_public_key_der(self, public_key_der):\n return self.load_received_public_key(VerifyingKey.from_der(public_key_der))", "def text2PublicKey(text:str):\n return RSA.importKey(b58decode(text))", "def load_public_key(filename):\n\twith open(str(filename) + \"_pub_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_public_key(\n\t\tkey_file.read(),\n\t\tbackend=default_backend()\n\t)", "def public_from_private(self, private_key):", "def load_rsa_key(key, key_type, key_encoding):\n # (bytes, EncryptionKeyType, KeyEncodingType) -> Any\n # narrow down the output type\n # https://github.com/aws/aws-dynamodb-encryption-python/issues/66\n try:\n loader = _RSA_KEY_LOADING[key_type][key_encoding]\n except KeyError:\n raise ValueError(\"Invalid key type and encoding: {} and {}\".format(key_type, key_encoding))\n\n kwargs = dict(data=key, backend=default_backend())\n if key_type is EncryptionKeyType.PRIVATE:\n kwargs[\"password\"] = None\n\n loaded_key = loader(**kwargs)\n\n if loaded_key.key_size < MinimumKeySizes.RSA.value:\n _LOGGER.warning(\"RSA keys smaller than %d bits are unsafe\", MinimumKeySizes.RSA.value)\n\n return loaded_key", "def subject_public_key(self, value):\n\n is_oscrypto = isinstance(value, asymmetric.PublicKey)\n if not isinstance(value, keys.PublicKeyInfo) and not is_oscrypto:\n raise TypeError(_pretty_message(\n '''\n subject_public_key must be an instance of\n asn1crypto.keys.PublicKeyInfo or oscrypto.asymmetric.PublicKey,\n not %s\n ''',\n _type_name(value)\n ))\n\n if is_oscrypto:\n value = value.asn1\n\n self._subject_public_key = value\n self._key_identifier = self._subject_public_key.sha1\n self._authority_key_identifier = None", "def wrap_rsa_key(public_key: RSAPublicKey, private_key_bytes: bytes) -> bytes:\n wrapped_key = public_key.encrypt(\n private_key_bytes,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=None,\n ),\n )\n encoded_wrapped_key = base64.b64encode(wrapped_key)\n return encoded_wrapped_key", "def get_public_key(self, kid):\n resp = self.request(self.jwks_url(), method=\"GET\")\n resp.raise_for_status()\n\n # find the proper key for the kid\n for key in resp.json()[\"keys\"]:\n if key[\"kid\"] == kid:\n return self.jwt_key_to_pem(key)\n raise DecodeError(f\"Cannot find kid={kid}\")", "def get_public_key_fingerprint(curve: object, temp_public_key: object) \\\n -> object:\n\n vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve)\n\n uncompressed_pub_key = vk.to_string('uncompressed')\n\n pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key)\n\n return pub_key_hash_fingerprint.hexdigest()", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {'privateExponent': util.Decode(rsa['privateExponent']),\n 'primeP': util.Decode(rsa['primeP']),\n 'primeQ': util.Decode(rsa['primeQ']),\n 'primeExponentP': util.Decode(rsa['primeExponentP']),\n 'primeExponentQ': util.Decode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Decode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def check_equal_rsa_pub_key(sk2_, sk_):\n pub_n = sk_.public_numbers()\n pub_n2 = sk2_.public_numbers()\n\n self.assertEqual(pub_n2.e, pub_n.e)\n self.assertEqual(pub_n2.n, pub_n.n)", "def verify(cypher, pub_key):\n\n if not isinstance(pub_key, key.PublicKey):\n raise TypeError(\"You must use the public pub_key with verify\")\n\n return gluechops(cypher, pub_key.e, pub_key.n, decrypt_int)", "def get_pub_key_bytes(priv_key: rsa.RSAPrivateKey) -> bytes:\n k = priv_key.public_key()\n return k.public_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)", "def load_public_key(file_path: str, encoding: Encoding = None) -> PublicKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PublicKey:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param key_data: given public keys data\n :return: loaded public key\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_public_key,\n Encoding.DER: load_der_public_key\n }[real_encoding](key_data, default_backend())\n\n return generic_load(file_path, solve)", "def key_for_signature(self, data, sig):\n verification = self.verify(data, sig)\n return PublicKey.objects.filter(\n fingerprint=verification.fingerprint,\n profile__verified=True,\n ).first()", "def import_public_key_from_pem_file(filename):\n with open(filename, \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(key_file.read(), backend=default_backend())\n return public_key", "def _create_rsa_key_pair(self, length, public_exponent=65537):\n self.logger.info(\n \"Generating an RSA key pair with length: {0}, and \"\n \"public_exponent: {1}\".format(\n length, public_exponent\n )\n )\n try:\n private_key = rsa.generate_private_key(\n public_exponent=public_exponent,\n key_size=length,\n backend=default_backend())\n public_key = private_key.public_key()\n\n private_bytes = private_key.private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n public_bytes = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.PKCS1)\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"An error occurred while generating the RSA key pair. \"\n \"See the server log for more information.\"\n )\n\n public_key = {\n 'value': public_bytes,\n 'format': enums.KeyFormatType.PKCS_1,\n 'public_exponent': public_exponent\n }\n private_key = {\n 'value': private_bytes,\n 'format': enums.KeyFormatType.PKCS_8,\n 'public_exponent': public_exponent\n }\n\n return public_key, private_key", "def GetPublicKey(self):\n return self.public_key", "def public_key(self): # pragma: no cover\n raise NotImplementedError()", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def test_get_public_key(self) -> None:\n\n expected = self.pem_public_key\n\n encryptor = DataEncryption()\n encryptor.set_public_key(self.pem_public_key.decode())\n\n actual = encryptor.get_public_key()\n\n self.assertEqual(expected, actual)", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {\n 'privateExponent': util.Base64WSDecode(rsa['privateExponent']),\n 'primeP': util.Base64WSDecode(rsa['primeP']),\n 'primeQ': util.Base64WSDecode(rsa['primeQ']),\n 'primeExponentP': util.Base64WSDecode(rsa['primeExponentP']),\n 'primeExponentQ': util.Base64WSDecode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Base64WSDecode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])" ]
[ "0.70528996", "0.682243", "0.6711425", "0.67085093", "0.6610189", "0.65048105", "0.6489869", "0.6488379", "0.64881927", "0.64246345", "0.6423231", "0.64138883", "0.6409382", "0.6395284", "0.63761365", "0.6347127", "0.6329381", "0.6321539", "0.62870216", "0.6245045", "0.6196625", "0.6152874", "0.61456263", "0.6134476", "0.61180043", "0.61159605", "0.6099881", "0.6069098", "0.6031535", "0.60043114", "0.5993228", "0.5953714", "0.59468365", "0.59373087", "0.5931271", "0.59269047", "0.5922097", "0.5909648", "0.5873117", "0.5858447", "0.58572525", "0.585712", "0.5844567", "0.5834613", "0.58337486", "0.58296925", "0.5816773", "0.58048713", "0.57905775", "0.5785524", "0.5765081", "0.57592076", "0.57520753", "0.5722384", "0.57196593", "0.5714867", "0.57124823", "0.5709474", "0.57048166", "0.570369", "0.5684008", "0.5679709", "0.5679566", "0.5648478", "0.5645037", "0.56431586", "0.56431586", "0.56366676", "0.56329125", "0.56263053", "0.56241155", "0.5619649", "0.55966777", "0.5594491", "0.5588748", "0.5582838", "0.5575654", "0.55746084", "0.55688834", "0.5560703", "0.5546753", "0.5530521", "0.5529934", "0.55166745", "0.54979366", "0.54919827", "0.5487673", "0.5472668", "0.5467465", "0.546661", "0.5464158", "0.5445376", "0.5439574", "0.5423269", "0.5422684", "0.5408133", "0.5399604", "0.5393962", "0.5381974", "0.5372651" ]
0.71075326
0
Creates a fingerprint that can be compared with a private key to see if the two form a pair. This fingerprint is not compatible with fingerprints generated by any other software.
def fingerprint(self): if self._fingerprint is None: self._fingerprint = _fingerprint(self.asn1, None) return self._fingerprint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fingerprint(key_object, load_private_key):\n\n if isinstance(key_object, PrivateKeyInfo):\n key = key_object['private_key'].parsed\n\n if key_object.algorithm == 'rsa':\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n params = key_object['private_key_algorithm']['parameters']\n public_key = Integer(pow(\n params['g'].native,\n key_object['private_key'].parsed.native,\n params['p'].native\n ))\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n public_key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key['public_key'].native\n if public_key is None:\n # This is gross, but since the EC public key is optional,\n # and we need to load the private key and use the crypto lib\n # to get the public key, we have to import the platform-specific\n # asymmetric implementation. This is the reason a bunch of the\n # imports are module imports, so we don't get an import cycle.\n public_key_object = load_private_key(key_object).public_key\n public_key = public_key_object.asn1['public_key'].parsed.native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n if isinstance(key_object, PublicKeyInfo):\n if key_object.algorithm == 'rsa':\n key = key_object['public_key'].parsed\n\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n key = key_object['public_key'].parsed\n params = key_object['algorithm']['parameters']\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key_object['public_key'].native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n raise ValueError(pretty_message(\n '''\n key_object must be an instance of the\n asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n classes, not %s\n ''',\n type_name(key_object)\n ))", "def test_create_keypair_save_both(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path,\n private_filepath=priv_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)\n\n self.assertTrue(os.path.isfile(priv_file_path))", "def test_generate_key_pair(self):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_key = Mock()\n mock_key.fingerprint = 'fake-fingerprint'\n mock_gpg.gen_key.return_value = mock_key\n\n mock_gpg.return_value = mock_gpg\n encryptor = self.test_init()\n fake_key = encryptor.generate_key_pair(key_type=\"RSA\", length=4096, options={\n 'name_real': 'Fake Name', 'name_email': '[email protected]', 'name_comment': 'Fake comment'})\n\n self.assertEqual(mock_gpg.gen_key_input.call_count, 1)\n self.assertEqual(fake_key, mock_key.fingerprint)", "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def test_create_keypair_save_pub_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)", "def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def get_public_key_fingerprint(curve: object, temp_public_key: object) \\\n -> object:\n\n vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve)\n\n uncompressed_pub_key = vk.to_string('uncompressed')\n\n pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key)\n\n return pub_key_hash_fingerprint.hexdigest()", "def check_equal_rsa_priv_key(sk2_priv, sk_priv):\n pri_n = sk_priv.private_numbers()\n pri_n2 = sk2_priv.private_numbers()\n\n # the library guarantees this: p is the larger factor\n self.assertTrue(pri_n.p > pri_n.q)\n\n self.assertTrue(\n pri_n2.p == pri_n.p and\n pri_n2.q == pri_n.q and\n pri_n2.d == pri_n.d and\n pri_n2.dmp1 == pri_n.dmp1 and\n pri_n2.dmq1 == pri_n.dmq1 and\n pri_n2.iqmp == pri_n.iqmp)", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()", "def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])", "def create_rsa_key_pair() -> Tuple[str, str]:\n key = RSA.generate(RSA_KEY_STRENGTH)\n public_key = key.publickey().export_key().decode()\n private_key = key.export_key().decode()\n return public_key, private_key", "def public_from_private(self, private_key):", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def swap_fingerprint(xpub, fingerprint):\n raw = decode_base58(xpub)\n swapped = raw[:5] + fingerprint + raw[9:]\n return encode_base58_checksum(swapped)", "def fingerprint(self, fingerprint_hash=None):\n try:\n fd, name = tempfile.mkstemp(prefix='sshkey-')\n with open(name, 'w') as fd:\n fd.write('{}'.format(self.line))\n if fingerprint_hash:\n p = Popen(('ssh-keygen', '-E', fingerprint_hash, '-lf', name), stdin=PIPE, stdout=PIPE, stderr=PIPE)\n else:\n p = Popen(('ssh-keygen', '-lf', name), stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = [str(v, 'utf-8') for v in p.communicate()]\n if p.returncode != 0:\n raise SSHKeyError('Error running ssh-keygen: returns {}'.format(p.returncode))\n os.unlink(name)\n return stdout.rstrip().split()[1].split(':', 1)[1]\n except Exception as e:\n raise SSHKeyError('Error getting fingerprint for {}: {}'.format(self.line, e))", "def check_equal_rsa_pub_key(sk2_, sk_):\n pub_n = sk_.public_numbers()\n pub_n2 = sk2_.public_numbers()\n\n self.assertEqual(pub_n2.e, pub_n.e)\n self.assertEqual(pub_n2.n, pub_n.n)", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def gen_key(self):\n\n if not self.private_key:\n self._gen_key()\n else:\n raise CryptoError(\"Private Key already existing\")", "def fingerprint(self, algorithm):", "def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def _create_pkey(self, commonname, serial):\n pkey = PKey()\n pkey.generate_key(crypto.TYPE_RSA, self.key_bits)\n private = crypto.dump_privatekey(crypto.FILETYPE_PEM,\n pkey).decode()\n key_path = self._get_key_path(commonname, serial)\n if os.path.exists(key_path):\n raise FileExistsError(key_path)\n with open(key_path, 'w') as private_file:\n private_file.writelines(private)\n\n key_link = self._get_key_link(commonname)\n if os.path.exists(key_link):\n os.unlink(key_link)\n os.symlink(os.path.basename(key_path), key_link)\n\n return pkey", "def generate_symmetric_key():\n return Fernet.generate_key()", "def generate_key_pair(G):\r\n\r\n global random\r\n\r\n if random == None:\r\n random = hash_drbg.HashDRBG()\r\n\r\n if G.order == None:\r\n raise RuntimeError(\"Base point must have order.\")\r\n\r\n key_size = log(ec.leftmost_bit(G.order)) / log(2)\r\n key_size = int(ceil(key_size) / 2)\r\n private_key = 1\r\n\r\n while private_key <= 1:\r\n private_key = random(key_size) #generates a random number\r\n #with twice the required bits\r\n private_key %= G.order\r\n\r\n return (private_key, G * private_key)", "def make_final_key(prime, public, private):\n\n key = (public ** private) % prime\n return key", "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def __init__(self, private_key):\n if private_key:\n if isinstance(private_key, str): # base58 encoded string\n self.private_key = PrivateKey.from_b58check(private_key)\n else:\n self.private_key = private_key\n self.public_key = self.private_key.public_key\n else:\n self.private_key = None\n self.public_key = None", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def testCreateSshKeyPairKeyAlreadyExists(self): #pylint: disable=invalid-name\n public_key = \"/fake/public_key\"\n private_key = \"/fake/private_key\"\n self.Patch(os.path, \"exists\", side_effect=[True, True])\n self.Patch(subprocess, \"check_call\")\n self.Patch(os, \"makedirs\", return_value=True)\n utils.CreateSshKeyPairIfNotExist(private_key, public_key)\n self.assertEqual(subprocess.check_call.call_count, 0) #pylint: disable=no-member", "def generate_keypair(bits):\n p = generate_prime(bits // 2)\n #print(p)\n q = generate_prime(bits // 2)\n #print(q)\n n = p * q\n return PrivateKey(p, q, n), PublicKey(n)", "def generate_key(seed):\n private_key = sha256(seed)\n public_key = privtopub(private_key)\n return {\"private\": private_key, \"public\": public_key}", "def test_create_key():\n\n assert symmetric.create_key() != \"\"", "def test_create_keypair_from_file(self):\n keys = RSA.generate(1024)\n nova_utils.save_keys_to_files(keys=keys, pub_file_path=pub_file_path)\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)", "def genKey(self, privateKey,otherKey):\n\t\tself.sharedSecret = self.genSecret(privateKey, otherKey)\n\n\t\t# Convert the shared secret (int) to an array of bytes in network order\n\t\t# Otherwise hashlib can't hash it.\n\t\ttry:\n\t\t\t_sharedSecretBytes = self.sharedSecret.to_bytes(\n\t\t\t\tself.sharedSecret.bit_length() // 8 + 1, byteorder=\"big\")\n\t\texcept AttributeError:\n\t\t\t_sharedSecretBytes = str(self.sharedSecret)\n\n\t\ts = hashlib.sha256()\n\t\ts.update(bytes(_sharedSecretBytes))\n\t\tself.key = s.digest()", "def generateKeys(bits=256):\n #print \"generating first prime number\"\n p = generatePrime(bits/2)\n #print \"generating second prime number\"\n q = generatePrime(bits/2)\n \n assert p != q\n #print p, \"\\n\", q\n assert gcd(p*q, (p-1)*(q-1)) == 1\n \n priv = PrivateKey(p, q)\n pub = PublicKey(p, q)\n \n priv.saveToFile()\n pub.saveToFile()\n \n return priv, pub", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)", "def gen_private_key():\n return DH.b2i(Random.new().read(DH_SIZE))", "def private_key(self):", "def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()", "def create_privatekey():\n \n # Generate the private key\n key_jwk = wallet.create_JWK()\n response_jwk = key_jwk.export(private_key=True, as_dict=True)\n\n return response_jwk", "def secret_to_key(secret, s2k_specifier):\r\n c = ord(s2k_specifier[8])\r\n EXPBIAS = 6\r\n count = (16+(c&15)) << ((c>>4) + EXPBIAS)\r\n\r\n d = sha1()\r\n tmp = s2k_specifier[:8]+secret\r\n slen = len(tmp)\r\n while count:\r\n if count > slen:\r\n d.update(tmp)\r\n count -= slen\r\n else:\r\n d.update(tmp[:count])\r\n count = 0\r\n return d.digest()", "def createKeyPair(type, bits):\n pkey = crypto.PKey()\n pkey.generate_key(type, bits)\n return pkey", "def testCreateSshKeyPairKeyAreCreated(self):\n public_key = \"/fake/public_key\"\n private_key = \"/fake/private_key\"\n self.Patch(os.path, \"exists\", return_value=False)\n self.Patch(os, \"makedirs\", return_value=True)\n self.Patch(subprocess, \"check_call\")\n self.Patch(os, \"rename\")\n utils.CreateSshKeyPairIfNotExist(private_key, public_key)\n self.assertEqual(subprocess.check_call.call_count, 1) #pylint: disable=no-member\n subprocess.check_call.assert_called_with( #pylint: disable=no-member\n utils.SSH_KEYGEN_CMD +\n [\"-C\", getpass.getuser(), \"-f\", private_key],\n stdout=mock.ANY,\n stderr=mock.ANY)", "def ex_create_keypair(self, name):\n params = {\n 'Action': 'CreateKeyPair',\n 'KeyName': name,\n }\n response = self.connection.request(self.path, params=params).object\n key_material = self._findtext(response, 'keyMaterial')\n key_fingerprint = self._findtext(response, 'keyFingerprint')\n return {\n 'keyMaterial': key_material,\n 'keyFingerprint': key_fingerprint,\n }", "def new_address(\n self,\n key_pair: Tuple[bytes, bytes],\n unique_factor: Union[Tuple[int, int], bytes],\n ) -> Tuple[Tuple[bytes, bytes], Optional[bytes], bytes, bytes]:", "def create_key ():", "def fingerprint_from_keybase(fingerprint, kb_obj):\n if 'public_keys' in kb_obj and \\\n 'pgp_public_keys' in kb_obj['public_keys']:\n for key in kb_obj['public_keys']['pgp_public_keys']:\n keyprint = fingerprint_from_var(key).lower()\n fingerprint = fingerprint.lower()\n if fingerprint == keyprint or \\\n keyprint.startswith(fingerprint) or \\\n keyprint.endswith(fingerprint):\n return {\n 'fingerprint': keyprint,\n 'bundle': key\n }\n\n return None", "def create_handshake_key_pair(cls) -> Tuple[bytes, bytes]:\n ...", "def test_rsa_key(self):\n key1 = generate_private_key(u'rsa')\n self.assertIsInstance(key1,rsa.RSAPrivateKey)\n key2 = generate_private_key(u'rsa')\n self.assertIsInstance(key2, rsa.RSAPrivateKey)\n self.assertNotEqual(\n key1.public_key().public_numbers(),\n key2.public_key().public_numbers()\n )", "def test_private_public():\n\n alice_priv = ECScalar(\n bytes.fromhex(\"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a\")\n )\n alice_public = ECPoint(\n bytes.fromhex(\"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a\")\n )\n\n assert x25519_scalarmult_base(alice_priv) == alice_public\n\n bob_priv = ECScalar(\n bytes.fromhex(\"5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb\")\n )\n bob_public = ECPoint(\n bytes.fromhex(\"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f\")\n )\n\n assert x25519_scalarmult_base(bob_priv) == bob_public\n\n k = ECPoint(bytes.fromhex(\"4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742\"))\n\n alice_k = x25519_scalarmult(alice_priv, bob_public)\n bob_k = x25519_scalarmult(bob_priv, alice_public)\n\n assert alice_k == bob_k\n assert alice_k == k", "def insert_hash(self, title, artist, song_id:int, fingerprint: str, offset:int):\n fingerprint = Fingerprints(song_id=song_id, song_title=title, artist=artist, hash=fingerprint, offset=offset)\n fingerprint.save()", "def _create_rsa_key_pair(self, length, public_exponent=65537):\n self.logger.info(\n \"Generating an RSA key pair with length: {0}, and \"\n \"public_exponent: {1}\".format(\n length, public_exponent\n )\n )\n try:\n private_key = rsa.generate_private_key(\n public_exponent=public_exponent,\n key_size=length,\n backend=default_backend())\n public_key = private_key.public_key()\n\n private_bytes = private_key.private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n public_bytes = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.PKCS1)\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"An error occurred while generating the RSA key pair. \"\n \"See the server log for more information.\"\n )\n\n public_key = {\n 'value': public_bytes,\n 'format': enums.KeyFormatType.PKCS_1,\n 'public_exponent': public_exponent\n }\n private_key = {\n 'value': private_bytes,\n 'format': enums.KeyFormatType.PKCS_8,\n 'public_exponent': public_exponent\n }\n\n return public_key, private_key", "def generateFingerprint(molecule, numInts=32, pathLength=7):\n \n paths = LinearPaths.generatePaths(molecule, maxdepth=pathLength)\n fp = Fingerprint.Fingerprint(numIntegers=numInts)\n\n for path in paths:\n fp.addPath(path)\n\n return fp", "def generate_rsa_key_pair(self):\n\t\tprint \"Started rsa key generation\"\n\t\tkey = RSA.generate(self.key_size, randfunc=self.random_number_generator)\n\t\t\t\n\t\tpub_key = key.publickey().exportKey()\n\t\tprint pub_key\n\t\t\n\n\t\tpriv_key = key.exportKey()\n\t\tprint \"Private key\", priv_key \n\t\tprint \"Note: Normally, the private key should be protected. For the purposes of this demo, I'm printing it to terminal.\"", "def genSecret(self, privateKey, otherKey):\n if(self.checkPublicKey(otherKey) is True):\n sharedSecret = pow(otherKey, privateKey, self.prime)\n return sharedSecret\n else:\n raise Exception(\"Invalid public key.\")", "def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def find_private_key(self):\n\t\tp, q = self.find_hidden_primes()\n\t\tself.private_key = self.calculate_private_key(p, q)\n\t\treturn self.private_key", "def gen_public_key(g, private, p):\n return pow(g, private, p)", "def sign(self, message, private_key):\n sk = private_key\n vk = sk.get_verifying_key()\n\n self.public_key = vk\n\n # This would be the Ed25519ph version (JavaScript ES7):\n # const message = crypto.createHash('sha512')\n # .update(Buffer.concat([this.messagePrefix, this.message]))\n # .digest()\n\n self.signature = sk.sign(message, encoding='bytes')", "def alt_stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i98\")", "def fingerprint_public_key_blob(blob):\n hash = sha256(blob).digest()\n encoded = b64encode(hash).decode('UTF-8').rstrip('=')\n return 'SHA256:{}'.format(encoded)", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def get_fingerprint():\n for fingerimg in range(1, 3):\n if fingerimg == 1:\n print(\"Place finger on sensor...\", end=\"\", flush=True)\n else:\n print(\"Place same finger again...\", end=\"\", flush=True)\n \n cont = 0\n\n while True:\n i = finger.get_image()\n if i == adafruit_fingerprint.OK:\n print(\"Image taken\")\n break\n if i == adafruit_fingerprint.NOFINGER:\n print(\".\", end=\"\", flush=True)\n time.sleep(0.01)\n cont += 1\n if cont >= 100:\n print(\"Tiempo de espera agotado\")\n return {'success': False, 'mismatch': False, 'msg': \"No se ha detectado la huella, intente nuevamente\"}\n elif i == adafruit_fingerprint.IMAGEFAIL:\n print(\"Imaging error\")\n return False\n else:\n print(\"Other error\")\n return {'success': False, 'mismatch': False, 'msg': \"Fallo al tomar la huella, intente nuevamente\"}\n\n print(\"Templating...\", end=\"\", flush=True)\n i = finger.image_2_tz(fingerimg)\n if i == adafruit_fingerprint.OK:\n print(\"Templated\")\n else:\n if i == adafruit_fingerprint.IMAGEMESS:\n print(\"Image too messy\")\n elif i == adafruit_fingerprint.FEATUREFAIL:\n print(\"Could not identify features\")\n elif i == adafruit_fingerprint.INVALIDIMAGE:\n print(\"Image invalid\")\n else:\n print(\"Other error\")\n return {'success': False, 'mismatch': False, 'msg': \"Fallo al tomar la huella, intente nuevamente\"}\n\n if fingerimg == 1:\n print(\"Remove finger\")\n while i != adafruit_fingerprint.NOFINGER:\n i = finger.get_image()\n\n print(\"Creating model...\", end=\"\", flush=True)\n i = finger.create_model()\n if i == adafruit_fingerprint.OK:\n print(\"Created\")\n else:\n if i == adafruit_fingerprint.ENROLLMISMATCH:\n print(\"Prints did not match\")\n return {'success': False, 'mismatch': True, 'msg': \"Las huellas ingresadas no coinciden, intente nuevamente\"}\n else:\n print(\"Other error\")\n return {'success': False, 'mismatch': False, 'msg': \"Fallo al tomar la huella, intente nuevamente\"}\n\n print(\"Downloading template...\")\n data = finger.get_fpdata(\"char\", 1)\n filename = f\"{uuid.uuid4()}.dat\"\n with open(f\"./fingers/{filename}\", \"wb\") as file:\n file.write(bytearray(data))\n print(f\"Template is saved in {filename} file.\")\n \n return {'success': True, 'msg': \"Imagen tomada satisfactoriamente\", 'data': filename}", "def create_pair(self, players_list: list[Player], id_number, already_paired=[]) -> tuple:\n for player_1, player_2 in zip(repeat(players_list[id_number]), players_list[1:]):\n tuple = (player_1, player_2)\n pair = self.check_faced_players(tuple)\n if pair is None:\n pass\n else:\n if pair[0] in already_paired:\n pass\n elif pair[1] in already_paired:\n pass\n elif pair[0] == pair[1]:\n pass\n else:\n return pair", "def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))", "def test_rendezvous_hash_roughly_fractional_change():\n first_choices = range(10)\n second_choices = range(9)\n\n test_keys = [str(x) for x in range(10000)]\n\n first_results = [\n marathon_tools.rendezvous_hash(first_choices, k) for k in test_keys\n ]\n second_results = [\n marathon_tools.rendezvous_hash(second_choices, k) for k in test_keys\n ]\n\n num_same = len([1 for x, y in zip(first_results, second_results) if x == y])\n assert num_same > 8900\n assert num_same < 9100", "def create_key(\n self,\n path: Union[bytes, str],\n type_: Optional[Union[bytes, str]] = None, # TODO enum\n policy_path: Optional[Union[bytes, str]] = None,\n auth_value: Optional[Union[bytes, str]] = None,\n exists_ok: bool = False,\n ) -> bool:\n path = _to_bytes_or_null(path)\n type_ = _to_bytes_or_null(type_)\n policy_path = _to_bytes_or_null(policy_path)\n auth_value = _to_bytes_or_null(auth_value)\n ret = lib.Fapi_CreateKey(self._ctx, path, type_, policy_path, auth_value)\n _chkrc(\n ret, acceptable=lib.TSS2_FAPI_RC_PATH_ALREADY_EXISTS if exists_ok else None\n )\n return ret == lib.TPM2_RC_SUCCESS", "def new_private_key(self):\n option = 'new_private_key'\n _file = self.__get_option(option)\n\n if _file and not os.path.exists(_file) and not os.path.isfile(_file):\n self.log.error(\"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n else:\n return None", "def test_jfpv1_sha256_structural_distinction_2(self):\n obj_in_1 = [\n [1, [\"x\", \"x\"]],\n [2, [\"y\", \"y\"]],\n ]\n fp_1 = create(input=json.dumps(obj_in_1), hash_function=hash_functions.SHA256, version=1)\n\n obj_in_2 = [\n [1, [\"x\", \"y\"]],\n [2, [\"x\", \"y\"]],\n ]\n fp_2 = create(input=json.dumps(obj_in_2), hash_function=hash_functions.SHA256, version=1)\n\n self.assertNotEqual(fp_1, fp_2)", "def __init__(self):\n self._keypair = RSA.generate(2048)\n self.public_key = self._keypair.publickey().exportKey()", "def do_new(argv):\n\n global PRIVATE_KEY\n\n if not PRIVATE_KEY:\n PRIVATE_KEY = wallet.get_private_key()\n else:\n get_new = yes_or_no(\"Private key already exist, do you want generate new one ?\")\n if get_new:\n PRIVATE_KEY = wallet.get_private_key()\n print(\"Private Key: '\" + PRIVATE_KEY + \"'\")\n cmpr_pub_key = wallet.get_compressed_public_key(PRIVATE_KEY, 1)\n addr = wallet.public_key_to_address(cmpr_pub_key, 0)\n open(\"data/address\", \"w\").write(addr)\n print(\"Public key was saved to 'data/cmpr_pub_key'\")", "def _createPrivateKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n if not key.hasPrivateKey():\r\n raise AssertionError()\r\n return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,\r\n key.dQ, key.qInv)", "def signSignWithKey(self, secondPrivateKey):\r\n self._secondPrivateKey = secondPrivateKey\r\n self.signSign()", "def _create_external_keypair():\n\n if not utils.use_external_resource(ctx.node.properties):\n return False\n\n key_pair_name = ctx.node.properties['resource_id']\n key_pair_in_account = _get_key_pair_by_id(key_pair_name)\n key_path_in_filesystem = _get_path_to_key_file()\n ctx.logger.debug(\n 'Path to key file: {0}.'.format(key_path_in_filesystem))\n if not key_pair_in_account:\n raise NonRecoverableError(\n 'External resource, but the key pair is not in the account.')\n if not _search_for_key_file(key_path_in_filesystem):\n raise NonRecoverableError(\n 'External resource, but the key file does not exist.')\n utils.set_external_resource_id(key_pair_name, ctx.instance)\n return True", "def fingerprint(keyed_data, digest_size=16):\n h = blake2b(digest_size=16)\n for key in sorted(keyed_data.keys()):\n val = keyed_data[key]\n s = json.dumps(val, sort_keys=True, cls=NpEncoder).encode()\n h.update(s)\n return h.hexdigest()", "def shared_key(private_key,public_key):\n\treturn private_key.exchange(public_key)", "def _generateSSHKey(self, private_filepath, public_filepath):\n self.log.debug(\"Writing SSH keys to: \" + private_filepath + \" and \" + public_filepath)\n\n (ssh_dir, filename) = os.path.split(os.path.expanduser(private_filepath))\n if not os.path.exists(ssh_dir):\n self.log.debug(\"SSH Directory doesn't exist, creating \" + ssh_dir)\n os.makedirs(ssh_dir)\n\n key = paramiko.RSAKey.generate(1024)\n key.write_private_key_file(os.path.expanduser(private_filepath))\n \n with open(os.path.expanduser(public_filepath),\"w\") as public:\n public.write(\"%s %s\" % (key.get_name(), key.get_base64()))\n\n public.close()", "def genSecret(self, privateKey, otherKey):\n\n\t\tsharedSecret = pow(otherKey, privateKey, self.prime_p)\n\t\treturn sharedSecret", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def generate_rsa_auxiliary_key_pair() -> AuxiliaryKeyPair:\n rsa_key_pair = rsa_keypair()\n return AuxiliaryKeyPair(rsa_key_pair.private_key, rsa_key_pair.public_key)", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def fingerprint(publicKeyN, publicKeyE=65537L):\n asn1Str = encoder.encode(univ.Sequence().setComponentByPosition(0, univ.Integer(publicKeyN)).setComponentByPosition(1, univ.Integer(publicKeyE)))\n hashString = hashlib.sha1(asn1Str).digest()\n hexlifiedHash = binascii.hexlify(hashString)\n return hexlifiedHash.upper()", "def gen_private_key(p, q, e):\n\n # Calculate 'n', n = p x q\n n = p * q\n # Calculate 'd', d = e^(-1) mod [(p-1)x(q-1)]\n phi = (p - 1) * (q - 1)\n # Need to use extended euclidean algorithm for 'd'\n gcd, d, b = egcd(e, phi)\n\n # Assign key parameters\n key_params = (n, e, d, p, q)\n # Construct private key\n key = RSA.construct(key_params)\n\n return key.exportKey()", "def generate_key_image(\n self, output: OutputInfo, private_view_key: bytes, private_spend_key: bytes\n ) -> bytes:", "def keygen(cls, bits, num_shares, threshold):\n if threshold < 2:\n raise('Threshold should be at least 2, but is {}'.format(threshold))\n primes = PrimeStorage()\n ((p, p_), (q, q_)) = primes.getRandomSafePrimes(bits // 2)\n\n n = p * q\n m = p_ * q_\n\n # find secret\n d = ext_euclid(n, m)\n\n pk = PublicPaillierKey(n)\n\n # Shamir secret sharing: determine polynomial\n coeffs = [d] + [randint(0, n*m) for _ in range(threshold-1)]\n # determine shares\n shares = [eval_polynomial(coeffs, i, n*m)\n for i in range(1, num_shares + 1)]\n key_shares = [PrivateKeyShare(\n shares[i-1], i, len(shares), threshold, pk) for i in range(1, num_shares + 1)]\n\n # - v, a generator of Z^*_(n^2)\n # - verification key for each decryption party\n\n return pk, key_shares", "def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()", "def requires_pairing(cls) -> bool:\n return False", "def _get_private_key(self, privkey=None):\n\n # read private keys from keyring\n privkeys = self.gpg.list_keys(True) # True => private keys\n if len(privkeys) > 0 and privkeys[-1].has_key('fingerprint'):\n fingerprints = []\n for k in privkeys:\n fingerprints.append(k['fingerprint'])\n else:\n # no private key in keyring\n return None\n\n if privkey:\n # check for existence of private key received as argument\n # DEVEL: check for expiration as well\n if len(privkey) > 7 and len(privkey) <= 40:\n for fp in fingerprints:\n if fp.endswith(privkey):\n # work with last 16 significant chars internally,\n # even if only 8 are required in trac.ini\n privkey = fp[-16:]\n break\n # no fingerprint matching key ID\n else:\n privkey = None\n else:\n # reset invalid key ID\n privkey = None\n else:\n # select (last) private key from keyring\n privkey = fingerprints[-1][-16:]\n\n return privkey", "def o_priv_to_pub(priv):\n pub = base10_multiply(G, priv)\n return '0' + str(2 + (pub[1] % 2)) + encode(pub[0], 16, 64)", "def stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i99\")", "def from_data(cls, fingerprint: bytes) -> Address:\n if len(fingerprint) == 65:\n fingerprint = fingerprint[1:]\n assert len(fingerprint) == 64, 'key data length error: %d' % len(fingerprint)\n # 1. digest = keccak256(fingerprint)\n digest = keccak256(data=fingerprint)\n # 2. address = hex_encode(digest.suffix(20))\n tail = digest[-20:]\n address = '0x' + eip55(address=hex_encode(data=tail))\n return cls(address=address)", "def has_gpg_key(fingerprint):\n if len(fingerprint) > 8:\n fingerprint = fingerprint[-8:]\n\n fingerprint = fingerprint.upper()\n cmd = flatten([gnupg_bin(), gnupg_home(), \"--list-public-keys\"])\n lines = stderr_output(cmd).split('\\n')\n return len([key for key in lines if key.find(fingerprint) > -1]) == 1", "def create_key_pair(self, key_name):\n response = key_pair.create_key_pair(self.url, self.verb, self.headers,\n self.version, key_name)\n if response is not None :\n res = CreateKeyPairResponse.CreateKeyPairResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))" ]
[ "0.6485189", "0.6272822", "0.6004521", "0.5935738", "0.5886129", "0.58848107", "0.5814873", "0.57364976", "0.57348734", "0.5725082", "0.5690334", "0.56683075", "0.56541723", "0.5592357", "0.5590496", "0.5577048", "0.55707747", "0.5545947", "0.5468767", "0.54511124", "0.5442331", "0.54422545", "0.54422146", "0.541472", "0.5396742", "0.53871787", "0.5386136", "0.53694016", "0.5347691", "0.53441554", "0.53421646", "0.53314656", "0.53168756", "0.5298292", "0.52555823", "0.5251397", "0.52472556", "0.5238893", "0.522475", "0.5223917", "0.5223752", "0.5214303", "0.52114534", "0.5204219", "0.51959795", "0.5174292", "0.51739055", "0.5162017", "0.51453656", "0.51448226", "0.5142992", "0.5140465", "0.513971", "0.51155156", "0.5112581", "0.510144", "0.5081666", "0.5072298", "0.5068935", "0.50581586", "0.5056931", "0.502904", "0.50285476", "0.5027212", "0.5025062", "0.4991371", "0.49906155", "0.49838084", "0.49748272", "0.49727443", "0.49561626", "0.49560666", "0.49544814", "0.49505305", "0.49489194", "0.49477616", "0.49462673", "0.49435118", "0.49301803", "0.4927683", "0.49192873", "0.49093017", "0.49030036", "0.49022427", "0.48979405", "0.48973656", "0.48966375", "0.48841828", "0.48782557", "0.48727304", "0.4871851", "0.48682994", "0.48641017", "0.485458", "0.4851213", "0.48410913", "0.48409212", "0.4828758", "0.4821775", "0.4817852", "0.4814556" ]
0.0
-1
Unwraps an asn1crypto.keys.PrivateKeyInfo object into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey.
def _unwrap_private_key_info(key_info): key_alg = key_info.algorithm if key_alg == 'rsa' or key_alg == 'rsassa_pss': return key_info['private_key'].parsed if key_alg == 'dsa': params = key_info['private_key_algorithm']['parameters'] parsed = key_info['private_key'].parsed return DSAPrivateKey({ 'version': 0, 'p': params['p'], 'q': params['q'], 'g': params['g'], 'public_key': Integer(pow( params['g'].native, parsed.native, params['p'].native )), 'private_key': parsed, }) if key_alg == 'ec': parsed = key_info['private_key'].parsed parsed['parameters'] = key_info['private_key_algorithm']['parameters'] return parsed raise ValueError('Unsupported key_info.algorithm "%s"' % key_info.algorithm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())", "def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)", "def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def _createPrivateKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n if not key.hasPrivateKey():\r\n raise AssertionError()\r\n return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,\r\n key.dQ, key.qInv)", "def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def convert_key_info_to_readable(key_info: dict[str, Any]) -> dict[str, Any]:\n key_fields = {'kid': 'key_id',\n 'kty': 'json_web_key_type',\n 'key_ops': 'key_operations',\n 'n': 'RSA_modulus',\n 'e': 'RSA_public_components',\n }\n for key, value in key_fields.items():\n if key in key_info:\n key_info[value] = key_info.pop(key)\n\n return key_info", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()", "def _get_private_key(self, privkey=None):\n\n # read private keys from keyring\n privkeys = self.gpg.list_keys(True) # True => private keys\n if len(privkeys) > 0 and privkeys[-1].has_key('fingerprint'):\n fingerprints = []\n for k in privkeys:\n fingerprints.append(k['fingerprint'])\n else:\n # no private key in keyring\n return None\n\n if privkey:\n # check for existence of private key received as argument\n # DEVEL: check for expiration as well\n if len(privkey) > 7 and len(privkey) <= 40:\n for fp in fingerprints:\n if fp.endswith(privkey):\n # work with last 16 significant chars internally,\n # even if only 8 are required in trac.ini\n privkey = fp[-16:]\n break\n # no fingerprint matching key ID\n else:\n privkey = None\n else:\n # reset invalid key ID\n privkey = None\n else:\n # select (last) private key from keyring\n privkey = fingerprints[-1][-16:]\n\n return privkey", "def get_key_from_keyring(self):\n private_key = keyring.get_password(self.keyring_service_name, \"private_key\")\n\n if private_key is not None:\n return base64.b64decode(private_key)\n else:\n return None", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def _unarmor_pem_openssl_private(headers, data, password):\n\n enc_algo = None\n enc_iv_hex = None\n enc_iv = None\n\n if 'DEK-Info' in headers:\n params = headers['DEK-Info']\n if params.find(',') != -1:\n enc_algo, enc_iv_hex = params.strip().split(',')\n else:\n enc_algo = 'RC4'\n\n if not enc_algo:\n return data\n\n if enc_iv_hex:\n enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))\n enc_algo = enc_algo.lower()\n\n enc_key_length = {\n 'aes-128-cbc': 16,\n 'aes-128': 16,\n 'aes-192-cbc': 24,\n 'aes-192': 24,\n 'aes-256-cbc': 32,\n 'aes-256': 32,\n 'rc4': 16,\n 'rc4-64': 8,\n 'rc4-40': 5,\n 'rc2-64-cbc': 8,\n 'rc2-40-cbc': 5,\n 'rc2-cbc': 16,\n 'rc2': 16,\n 'des-ede3-cbc': 24,\n 'des-ede3': 24,\n 'des3': 24,\n 'des-ede-cbc': 16,\n 'des-cbc': 8,\n 'des': 8,\n }[enc_algo]\n\n enc_key = hashlib.md5(password + enc_iv[0:8]).digest()\n while enc_key_length > len(enc_key):\n enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()\n enc_key = enc_key[0:enc_key_length]\n\n enc_algo_name = {\n 'aes-128-cbc': 'aes',\n 'aes-128': 'aes',\n 'aes-192-cbc': 'aes',\n 'aes-192': 'aes',\n 'aes-256-cbc': 'aes',\n 'aes-256': 'aes',\n 'rc4': 'rc4',\n 'rc4-64': 'rc4',\n 'rc4-40': 'rc4',\n 'rc2-64-cbc': 'rc2',\n 'rc2-40-cbc': 'rc2',\n 'rc2-cbc': 'rc2',\n 'rc2': 'rc2',\n 'des-ede3-cbc': 'tripledes',\n 'des-ede3': 'tripledes',\n 'des3': 'tripledes',\n 'des-ede-cbc': 'tripledes',\n 'des-cbc': 'des',\n 'des': 'des',\n }[enc_algo]\n decrypt_func = crypto_funcs[enc_algo_name]\n\n if enc_algo_name == 'rc4':\n return decrypt_func(enc_key, data)\n\n return decrypt_func(enc_key, data, enc_iv)", "def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")", "def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)", "def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)", "def deserializePrivateKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_private_key(string, password = None , backend = bc)", "def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)", "def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key", "def rsa_private_key(ctx, key_size=\"4096\"):\n rsa_key_size = int(key_size)\n\n key = rsa.generate_private_key(public_exponent=65537, key_size=rsa_key_size, backend=default_backend())\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def get_principal_pkey( user_email, observer_secret ):\n \n sp = get_principal_data( user_email )\n if sp is None:\n logger.error(\"Failed to find private key for principal %s\" % user_email )\n return None \n \n public_key_pem = sp.public_key_pem\n sealed_private_key_pem = sp.sealed_private_key\n\n # unseal\n private_key_pem = verify_and_unseal_blob(public_key_pem, observer_secret, sealed_private_key_pem)\n if private_key_pem is None:\n logger.error(\"Failed to unseal private key\")\n\n return private_key_pem", "def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)", "def private_key(\n self,\n key: str,\n default: Any = undefined,\n description: str = None,\n key_format: Optional[EncryptionKeyFormat] = None,\n passphrase: Optional[str] = None,\n **kwargs\n ) -> Optional[PrivateKey]:\n cast_key = partial(cast_private_key, key_format=key_format, passphrase=passphrase)\n return self._process(key, description=description, default=default, cast=cast_key,type=PrivateKey, **kwargs)", "def serializePrivateKey(private_key):\n\treturn private_key.private_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PrivateFormat.PKCS8,\n\t\tencryption_algorithm=serialization.NoEncryption()\n\t)", "def private_key(self):\n if self._private_key is not None:\n return self._private_key[0]\n\n spk = self.serialized_private_key\n passphrase = self.passphrase\n\n try:\n self._private_key = [\n serialization.load_pem_private_key(\n self.serialized_private_key,\n backend=default_backend(),\n password=self.passphrase)]\n\n return self._private_key[0]\n\n except:\n raise\n self._private_key = [None]\n return self._private_key[0]", "def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()", "def validate_privatekey_pem(key_pem):\n assert isinstance(key_pem, str)\n\n private_key_cryptography = serialization.load_pem_private_key(\n data=key_pem.encode('ascii'),\n password=None,\n backend=cryptography_default_backend\n )\n\n if not isinstance(private_key_cryptography, rsa.RSAPrivateKey):\n sys.exit('Unexpected private key type')\n\n return private_key_cryptography", "def gen_priv_key(key_size: int = 2048) -> rsa.RSAPrivateKey:\n return rsa.generate_private_key(public_exponent=65537, key_size=key_size)", "def __decryptRSA(msg, user):\n # Load user's private key\n try:\n with open(\"%s/%s/keys/privateKey.pem\" % (USERS, user), \"rb\") as f:\n privateKey = serialization.load_pem_private_key(\n f.read(),\n password=None,\n backend=default_backend()\n )\n f.close()\n except:\n print(\"Error opening user's private key\")\n print(sys.exc_info())\n return None\n \n # Decrypt message\n return privateKey.decrypt(\n msg, \n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {'privateExponent': util.Decode(rsa['privateExponent']),\n 'primeP': util.Decode(rsa['primeP']),\n 'primeQ': util.Decode(rsa['primeQ']),\n 'primeExponentP': util.Decode(rsa['primeExponentP']),\n 'primeExponentQ': util.Decode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Decode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def get_private_key(self, address58: str) -> 'EllipticCurvePrivateKey':\n return self.keys[address58]", "def decrypt_using_private_key(message):\n public_key_path = os.path.join('keys', 'private.key')\n with open(public_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n cipher = PKCS1_OAEP.new(private_key)\n encrypted = cipher.decrypt(message)\n return encrypted.hex()", "def _try_load_ca_private_key(path):\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'rb').read())\n if pkey.bits() < 2048:\n raise ValueError(\"I'm sorry Dave, I can't let you use a small \"\n \"RSA key.\")\n pkey.check()\n return pkey", "def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)", "def generate_rsa_private_key(key_size: int = 2048, exponent: int = 65537) -> RSAPrivateKeyWithSerialization:\n return rsa.generate_private_key(backend=default_backend(), public_exponent=exponent, key_size=key_size)", "def test_get_private_key(self):\n\n expected = self.pem_private_key\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n actual = encryptor.get_private_key()\n\n self.assertEqual(expected, actual)", "def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)", "def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)", "def _rsa_keydict_to_keyobj(publickey = None, privatekey = None):\r\n if publickey is not None:\r\n if not rsa_is_valid_publickey(publickey):\r\n raise ValueError, \"Invalid public key\"\r\n \r\n if privatekey is not None: \r\n if not rsa_is_valid_privatekey(privatekey):\r\n raise ValueError, \"Invalid private key\"\r\n \r\n if publickey is None and privatekey is None:\r\n raise TypeError(\"Must provide either private or public key dictionary\")\r\n\r\n if publickey is None: \r\n publickey = {}\r\n if privatekey is None: \r\n privatekey = {}\r\n \r\n n = None \r\n e = None\r\n d = None\r\n p = None\r\n q = None\r\n \r\n if 'd' in privatekey: \r\n d = long(privatekey['d'])\r\n if 'p' in privatekey: \r\n p = long(privatekey['p'])\r\n if 'q' in privatekey: \r\n q = long(privatekey['q']) \r\n \r\n if 'n' in publickey: \r\n n = long(publickey['n'])\r\n # n is needed for a private key even thought it is not\r\n # part of the standard public key dictionary.\r\n else: n = p*q \r\n if 'e' in publickey: \r\n e = long(publickey['e'])\r\n \r\n rsa_implementation = RSA_RSAImplementation()\r\n rsa_key = rsa_implementation.construct((n,e,d,p,q))\r\n \r\n return rsa_key", "async def retrieve_private_key(self) -> Tuple[str, str]:\n\n filename, file_path = random.choice(self._private_keys)\n async with aiofiles.open(file_path, mode='r') as file:\n private_key = await file.read()\n return private_key, self._create_public_key_identifier(filename)", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def private_key(self):\n return PrivateKey(self._sk.private_bytes(\n encoding=serialization.Encoding.Raw,\n format=serialization.PrivateFormat.Raw,\n encryption_algorithm=serialization.NoEncryption()))", "def _decrypt_pvtkey(self, pvtkey_file: str, passphrase: str) -> str:\n\n keydata: str = None\n if pvtkey_file:\n try:\n keydata = asyncssh.public_key.read_private_key(pvtkey_file,\n passphrase)\n except Exception as e:\n self.logger.error(\n f\"ERROR: Unable to read private key file {pvtkey_file}\"\n f\"for jump host due to {str(e)}\")\n\n return keydata", "def _generate_ca_private_key(path):\n DEFAULT_KEY_ALG = crypto.TYPE_RSA\n DEFAULT_KEY_BITS = 2048\n\n pkey = crypto.PKey()\n pkey.generate_key(DEFAULT_KEY_ALG, DEFAULT_KEY_BITS)\n data = crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)\n open(path, 'wb').write(data)\n\n return pkey", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {\n 'privateExponent': util.Base64WSDecode(rsa['privateExponent']),\n 'primeP': util.Base64WSDecode(rsa['primeP']),\n 'primeQ': util.Base64WSDecode(rsa['primeQ']),\n 'primeExponentP': util.Base64WSDecode(rsa['primeExponentP']),\n 'primeExponentQ': util.Base64WSDecode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Base64WSDecode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def get_ca_private_key():\n return _try_load_ca_private_key(cfg.ca_private_key_path())", "def get_private_key_pem( pkey_path ):\n \n # get the OpenCloud private key \n observer_pkey = syndicate_storage.read_private_key( pkey_path )\n if observer_pkey is None:\n logger.error(\"Failed to load Observer private key\")\n return None\n \n observer_pkey_pem = observer_pkey.exportKey()\n \n return observer_pkey_pem", "def generate_rsa_auxiliary_key_pair() -> AuxiliaryKeyPair:\n rsa_key_pair = rsa_keypair()\n return AuxiliaryKeyPair(rsa_key_pair.private_key, rsa_key_pair.public_key)", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def __init__(self, key_info):\n if (key_info.type != client_pb2.KeyInfo.ECDSA):\n raise error.UnsupportedAlgorithmError(\n \"Expected ECDSA key, but got key type %d\" % key_info.type)\n\n # Will raise a PemError on invalid encoding\n self.__der, _ = pem.from_pem(key_info.pem_key, self.__READ_MARKERS)\n try:\n self.__key = ecdsa.VerifyingKey.from_der(self.__der)\n except ecdsa.der.UnexpectedDER as e:\n raise error.EncodingError(e)", "def get_private_key():\n if not os.path.exists(_private_key_path):\n return None\n\n try:\n with open(_private_key_path) as secret_file:\n return secret_file.read()\n\n except Exception as exc:\n log.error(f'Could not read private key.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def decrypt(data, private_key):\r\n\r\n # Retrieve session key, tag, ciphertext and nonce from file\r\n enc_session_key, nonce, tag, ciphertext = \\\r\n [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]\r\n\r\n\r\n # Decrypt the session key\r\n session_key = cipher_rsa.decrypt(enc_session_key)\r\n\r\n # Decrypt the data with the AES session key\r\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\r\n\r\n return data", "def read_private_key_file(pkey_file,\n pkey_password=None,\n key_type=None,\n logger=None):\n ssh_pkey = None\n key_types = (paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey)\n if hasattr(paramiko, 'Ed25519Key'):\n # NOQA: new in paramiko>=2.2: http://docs.paramiko.org/en/stable/api/keys.html#module-paramiko.ed25519key\n key_types += (paramiko.Ed25519Key, )\n for pkey_class in (key_type,) if key_type else key_types:\n try:\n ssh_pkey = pkey_class.from_private_key_file(\n pkey_file,\n password=pkey_password\n )\n if logger:\n logger.debug('Private key file ({0}, {1}) successfully '\n 'loaded'.format(pkey_file, pkey_class))\n break\n except paramiko.PasswordRequiredException:\n if logger:\n logger.error('Password is required for key {0}'\n .format(pkey_file))\n break\n except paramiko.SSHException:\n if logger:\n logger.debug('Private key file ({0}) could not be loaded '\n 'as type {1} or bad password'\n .format(pkey_file, pkey_class))\n return ssh_pkey", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def cred_extract(self, rp_id: str, credential_id: bytes) -> Optional[ec.EllipticCurvePrivateKey]:\n rp_id_hash = sha256(rp_id.encode())\n\n aesgcm = AESGCM(self.master_key)\n aad = rp_id_hash\n nonce = sha256(aad + self.master_key)[4:16]\n\n try:\n data = aesgcm.decrypt(nonce, credential_id, aad)\n return ec.derive_private_key(int.from_bytes(data, 'big'), ec.SECP256R1(), default_backend())\n except cryptography.exceptions.InvalidTag:\n return None", "def ft_seal_and_unseal():\n print \"generating key pair\"\n pubkey_pem, privkey_pem = api.generate_key_pair( 4096 )\n \n sealed_buf = create_sealed_and_signed_blob( privkey_pem, \"foo\", \"hello world\")\n print \"sealed data is:\\n\\n%s\\n\\n\" % sealed_buf\n\n buf = verify_and_unseal_blob( pubkey_pem, \"foo\", sealed_buf )\n print \"unsealed data is: \\n\\n%s\\n\\n\" % buf", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def read_chain_pair(private_key, certificates):\n with open(private_key, 'rb') as f:\n private_key = f.read()\n with open(certificates, 'rb') as f:\n certificates = f.read()\n return (private_key, certificates)", "def verify_and_unseal_blob( public_key_pem, secret, blob_data ):\n\n # verify it \n rc, sealed_data = syndicate_crypto.verify_and_parse_json( public_key_pem, blob_data )\n if rc != 0:\n logger.error(\"Failed to verify and parse blob, rc = %s\" % rc)\n return None\n\n logger.info(\"Unsealing credential data\")\n\n rc, data = c_syndicate.password_unseal( sealed_data, secret )\n if rc != 0:\n logger.error(\"Failed to unseal blob, rc = %s\" % rc )\n return None\n\n return data", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def load_private_key_der(self, private_key_der):\n return self.load_private_key(SigningKey.from_der(private_key_der))", "def test_rsa_key(self):\n key1 = generate_private_key(u'rsa')\n self.assertIsInstance(key1,rsa.RSAPrivateKey)\n key2 = generate_private_key(u'rsa')\n self.assertIsInstance(key2, rsa.RSAPrivateKey)\n self.assertNotEqual(\n key1.public_key().public_numbers(),\n key2.public_key().public_numbers()\n )", "def parse_config(self, data):\n match = re.search(\"-----BEGIN RSA PRIVATE KEY-----.*\" + \\\n \"-----END RSA PRIVATE KEY-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Private key not found\")\n key = match.group()\n\n match = re.search(\"-----BEGIN CERTIFICATE-----.*\" + \\\n \"-----END CERTIFICATE-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Certificate not found\")\n cert = match.group()\n # config also contains allowed, dns, but we don't use that for GCMU\n return (cert, key)", "def decrypt(self, encBytes):\r\n if not self.hasPrivateKey():\r\n raise AssertionError()\r\n if len(encBytes) != numBytes(self.n):\r\n return None\r\n c = bytesToNumber(encBytes)\r\n if c >= self.n:\r\n return None\r\n m = self._rawPrivateKeyOp(c)\r\n decBytes = numberToByteArray(m, numBytes(self.n))\r\n #Check first two bytes\r\n if decBytes[0] != 0 or decBytes[1] != 2:\r\n return None\r\n #Scan through for zero separator\r\n for x in range(1, len(decBytes)-1):\r\n if decBytes[x]== 0:\r\n break\r\n else:\r\n return None\r\n return decBytes[x+1:] #Return everything after the separator\r", "def get_rsa_asymn_keys(public_exponent = 65537, key_size = 2048, bc = backend):\n\tprivate_key = asymmetric.rsa.generate_private_key(public_exponent = public_exponent, key_size = key_size, backend = bc)\n\treturn private_key,private_key.public_key()", "def kem_decapsulate(self, encapsulated_key, private_key):\n d_encapsulated_key = Data(encapsulated_key)\n shared_key = Buffer(self.kem_shared_key_len(key=private_key))\n status = self._lib_vscf_ecc.vscf_ecc_kem_decapsulate(self.ctx, d_encapsulated_key.data, private_key.c_impl, shared_key.c_buffer)\n VscfStatus.handle_status(status)\n return shared_key.get_bytes()", "def tls_certificate_private_key_pem_path(tls_certificate):\n with tls_certificate.private_key_pem.tempfile() as cert_key_pem:\n yield cert_key_pem", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )", "def parsePEMKey(s, private=False, public=False, passwordCallback=None,\r\n implementations=[\"openssl\", \"python\"]):\r\n for implementation in implementations:\r\n if implementation == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n key = OpenSSL_RSAKey.parse(s, passwordCallback)\r\n break\r\n elif implementation == \"python\":\r\n key = Python_RSAKey.parsePEM(s)\r\n break\r\n else:\r\n raise ValueError(\"No acceptable implementations\")\r\n\r\n return _parseKeyHelper(key, private, public)", "def convert_key_to_pem ( key_filename, output_filename ) :\n cmd = 'openssl rsa -in ' + key_filename + ' -outform PEM -out ' + output_filename\n return subprocess.call( cmd, shell = True )", "def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))", "def rsa_string_to_privatekey(mystr):\r\n if len(mystr.split()) != 3:\r\n raise ValueError, \"Invalid private key string\"\r\n \r\n return {'d':long(mystr.split()[0]), 'p':long(mystr.split()[1]), 'q':long(mystr.split()[2])}", "def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def read_keypair(priv_key_file, public_key_file):\n key_pair = {}\n with open(priv_key_file) as f:\n key_data = f.read()\n f.close()\n key_pair[\"key\"] = key_data\n with open(public_key_file) as f:\n pub_data = f.read()\n f.close()\n key_pair[\"pub\"] = pub_data\n for i in [priv_key_file, public_key_file]:\n os.remove(i)\n return key_pair", "def _load_private_key(self, filename, keytype=None):\n type_map = {\n 'dsa': ssh.DSSKey,\n 'rsa': ssh.RSAKey}\n\n if keytype is None:\n with open(filename, 'rb') as k:\n keydata = k.read()\n \n m = re.search(\"BEGIN (.*?) PRIVATE KEY\", keydata)\n if m:\n keytype = m.group(1)\n\n keycls = type_map.get(keytype.lower(), 'dsa')\n\n try:\n key = keycls.from_private_key_file(filename)\n log.debug(\"Loaded key '%s' without password.\", filename)\n except ssh.PasswordRequiredException:\n passphrase = self.config.get('passphrase')\n \n if callable(passphrase):\n passphrase = passphrase(filename,\n self.config.get('remote_host', 'localhost'),\n self.config.get('username', getpass.getuser()))\n if passphrase is None:\n return\n\n if not passphrase:\n passphrase = getpass.getpass(\"Key passphrase: \")\n \n key = keycls.from_private_key_file(filename, passphrase)\n\n return key", "def find_private_key(self):\n\t\tp, q = self.find_hidden_primes()\n\t\tself.private_key = self.calculate_private_key(p, q)\n\t\treturn self.private_key", "def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key):\n algorithm = RsaAlgorithmIdentifier()\n algorithm[\"rsaEncryption\"] = RSA_ENCRYPTION_ASN1_OID\n\n pkcs8_key = PKCS8PrivateKey()\n pkcs8_key[\"version\"] = 0\n pkcs8_key[\"privateKeyAlgorithm\"] = algorithm\n pkcs8_key[\"privateKey\"] = pkcs1_key\n\n return encoder.encode(pkcs8_key)", "def generate_private_key(self):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key generation.\")\n return self.load_private_key(SigningKey.generate(curve=self.curve))", "def fromPrivkey(cls, ecdsaPrivkey):\n rawPrivkey = cls.PRIVATE_KEY_PREFIX + ecdsaPrivkey.to_string()\n privkey = b2a_hashed_base58(rawPrivkey)\n\n ecdsaPubkey = ecdsaPrivkey.get_verifying_key()\n rawPubkey = cls.PUBLIC_KEY_PREFIX + hash160(\n \"\\x04\" + ecdsaPubkey.to_string())\n pubkey = b2a_hashed_base58(rawPubkey)\n\n return cls(pubkey, privkey)", "def get_rsa_server_keys() -> Tuple[keys.Key, keys.Key]:\n try:\n # Load existing keys\n return keys.Key.fromFile('host_rsa'), keys.Key.fromFile('host_rsa.pub')\n\n except (FileNotFoundError, keys.BadKeyError):\n # Keys need to be generated.\n private_key, public_key = generate_rsa_server_keys()\n logger.info(\"New server keys were generated.\")\n\n return (\n keys.Key.fromString(private_key, type=\"PRIVATE_OPENSSH\"),\n keys.Key.fromString(public_key)\n )", "def check_equal_rsa_priv_key(sk2_priv, sk_priv):\n pri_n = sk_priv.private_numbers()\n pri_n2 = sk2_priv.private_numbers()\n\n # the library guarantees this: p is the larger factor\n self.assertTrue(pri_n.p > pri_n.q)\n\n self.assertTrue(\n pri_n2.p == pri_n.p and\n pri_n2.q == pri_n.q and\n pri_n2.d == pri_n.d and\n pri_n2.dmp1 == pri_n.dmp1 and\n pri_n2.dmq1 == pri_n.dmq1 and\n pri_n2.iqmp == pri_n.iqmp)", "def _decode_credential_from_json(self, cred_entry):\n raw_key = cred_entry['key']\n key = util.dict_to_tuple_key(raw_key)\n credential = None\n credential = Credentials.new_from_json(\n json.dumps(cred_entry['credential']))\n return (key, credential)", "def get_rsa_key_pair() -> Tuple[str, Optional[str]]:\n if RSA_PUBLIC_KEY_PATH is not None:\n # Read public key.\n with open(RSA_PUBLIC_KEY_PATH) as f_obj:\n public_key = f_obj.read()\n\n # Read private key if given.\n private_key = None\n if RSA_PRIVATE_KEY_PATH is not None:\n with open(RSA_PRIVATE_KEY_PATH) as f_obj:\n private_key = f_obj.read()\n\n return (public_key, private_key)\n\n if RSA_PUBLIC_KEY is not None:\n return (RSA_PUBLIC_KEY, RSA_PRIVATE_KEY)\n\n return create_rsa_key_pair()", "def decrypt_kms_data(encrypted_data):\n if not AWS_REGION:\n return\n\n kms = boto3.client('kms', region_name=AWS_REGION)\n\n decrypted = kms.decrypt(CiphertextBlob=encrypted_data)\n\n if decrypted.get('KeyId'):\n # Decryption succeed\n decrypted_value = decrypted.get('Plaintext', '')\n if isinstance(decrypted_value, bytes):\n decrypted_value = decrypted_value.decode('utf-8')\n return decrypted_value" ]
[ "0.72001606", "0.61105424", "0.59807235", "0.59552276", "0.5781235", "0.5733041", "0.5708165", "0.56828797", "0.5617318", "0.56119716", "0.55409443", "0.55051327", "0.54653615", "0.54501706", "0.5382498", "0.53593355", "0.53350115", "0.5326151", "0.5310217", "0.5288465", "0.52867794", "0.5273756", "0.526497", "0.52596426", "0.52040935", "0.51865596", "0.5159534", "0.51291484", "0.51176167", "0.5079574", "0.50689745", "0.5060514", "0.50370145", "0.5032764", "0.50004673", "0.49883863", "0.49862486", "0.493617", "0.49293116", "0.49151966", "0.48922145", "0.48888606", "0.4877124", "0.48710045", "0.4867854", "0.48402745", "0.4821996", "0.48135397", "0.48133305", "0.48022518", "0.47968635", "0.47867218", "0.47786677", "0.47712398", "0.47694516", "0.47687078", "0.47588474", "0.47511247", "0.47498107", "0.47457927", "0.47398332", "0.47358546", "0.47163942", "0.47159857", "0.47096208", "0.47054598", "0.4700774", "0.46720254", "0.4661968", "0.46551847", "0.46501565", "0.46480548", "0.46268082", "0.4621427", "0.462133", "0.4619442", "0.46112078", "0.4610536", "0.4600102", "0.45962372", "0.45735115", "0.45734084", "0.45627627", "0.45597577", "0.45485637", "0.45472935", "0.4536798", "0.4534183", "0.4516212", "0.45148802", "0.45140266", "0.45086718", "0.45044816", "0.4499838", "0.447802", "0.4473188", "0.44638476", "0.44635487", "0.44525355", "0.4442126" ]
0.82866263
0
Returns a fingerprint used for correlating public keys and private keys
def _fingerprint(key_object, load_private_key): if isinstance(key_object, PrivateKeyInfo): key = key_object['private_key'].parsed if key_object.algorithm == 'rsa': to_hash = '%d:%d' % ( key['modulus'].native, key['public_exponent'].native, ) elif key_object.algorithm == 'dsa': params = key_object['private_key_algorithm']['parameters'] public_key = Integer(pow( params['g'].native, key_object['private_key'].parsed.native, params['p'].native )) to_hash = '%d:%d:%d:%d' % ( params['p'].native, params['q'].native, params['g'].native, public_key.native, ) elif key_object.algorithm == 'ec': public_key = key['public_key'].native if public_key is None: # This is gross, but since the EC public key is optional, # and we need to load the private key and use the crypto lib # to get the public key, we have to import the platform-specific # asymmetric implementation. This is the reason a bunch of the # imports are module imports, so we don't get an import cycle. public_key_object = load_private_key(key_object).public_key public_key = public_key_object.asn1['public_key'].parsed.native to_hash = '%s:' % key_object.curve[1] to_hash = to_hash.encode('utf-8') to_hash += public_key if isinstance(to_hash, str_cls): to_hash = to_hash.encode('utf-8') return hashlib.sha256(to_hash).digest() if isinstance(key_object, PublicKeyInfo): if key_object.algorithm == 'rsa': key = key_object['public_key'].parsed to_hash = '%d:%d' % ( key['modulus'].native, key['public_exponent'].native, ) elif key_object.algorithm == 'dsa': key = key_object['public_key'].parsed params = key_object['algorithm']['parameters'] to_hash = '%d:%d:%d:%d' % ( params['p'].native, params['q'].native, params['g'].native, key.native, ) elif key_object.algorithm == 'ec': public_key = key_object['public_key'].native to_hash = '%s:' % key_object.curve[1] to_hash = to_hash.encode('utf-8') to_hash += public_key if isinstance(to_hash, str_cls): to_hash = to_hash.encode('utf-8') return hashlib.sha256(to_hash).digest() raise ValueError(pretty_message( ''' key_object must be an instance of the asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo classes, not %s ''', type_name(key_object) ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()", "def fingerprint(self):\n\n if self._fingerprint is None:\n self._fingerprint = _fingerprint(self.asn1, None)\n return self._fingerprint", "def get_public_key_fingerprint(curve: object, temp_public_key: object) \\\n -> object:\n\n vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve)\n\n uncompressed_pub_key = vk.to_string('uncompressed')\n\n pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key)\n\n return pub_key_hash_fingerprint.hexdigest()", "def fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"fingerprint\")", "def _gpg_fingerprints(self) -> List[str]:\n return self._gpg_keys.fingerprints", "def fingerprint(self):\n return self.gpg.list_keys()[0]['fingerprint']", "def fingerprint(self) -> str:\n fp = self.sha256.hex()\n return fp", "def host_key_fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"host_key_fingerprint\")", "def fingerprint_public_key_blob(blob):\n hash = sha256(blob).digest()\n encoded = b64encode(hash).decode('UTF-8').rstrip('=')\n return 'SHA256:{}'.format(encoded)", "def fingerprint(self, algorithm):", "def fingerprint(self):\n return self.identifier[:4]", "def fingerprint(publicKeyN, publicKeyE=65537L):\n asn1Str = encoder.encode(univ.Sequence().setComponentByPosition(0, univ.Integer(publicKeyN)).setComponentByPosition(1, univ.Integer(publicKeyE)))\n hashString = hashlib.sha1(asn1Str).digest()\n hexlifiedHash = binascii.hexlify(hashString)\n return hexlifiedHash.upper()", "def fingerprint_from_keybase(fingerprint, kb_obj):\n if 'public_keys' in kb_obj and \\\n 'pgp_public_keys' in kb_obj['public_keys']:\n for key in kb_obj['public_keys']['pgp_public_keys']:\n keyprint = fingerprint_from_var(key).lower()\n fingerprint = fingerprint.lower()\n if fingerprint == keyprint or \\\n keyprint.startswith(fingerprint) or \\\n keyprint.endswith(fingerprint):\n return {\n 'fingerprint': keyprint,\n 'bundle': key\n }\n\n return None", "def get_fingerprint(self):\n return self.fp", "def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def getFingerprint(self):\r\n if self.getNumCerts() == 0:\r\n raise AssertionError()\r\n return self.x509List[0].getFingerprint()", "def get_fingerprint(filepath):\n ssh_file = open(filepath, 'r')\n ssh_file_contents = ssh_file.readlines()\n ssh_fingerprint = ''.join(ssh_file_contents).strip()\n\n return ssh_fingerprint", "def fingerprint(keyed_data, digest_size=16):\n h = blake2b(digest_size=16)\n for key in sorted(keyed_data.keys()):\n val = keyed_data[key]\n s = json.dumps(val, sort_keys=True, cls=NpEncoder).encode()\n h.update(s)\n return h.hexdigest()", "def convert_to_fingerprint(s):\n\n\ttry:\n\t\t# Convert SMILES to Molecule object\n\t\tmolecule = Chem.MolFromSmiles(s)\n\t\t# Get MACCS Key from Molecule object\n\t\tmaccs_key = MACCSkeys.GenMACCSKeys(molecule)\n\t\treturn maccs_key.ToBitString()\n\texcept:\n\t\treturn None", "def fingerprint(self):\n return self.pod.hash_file(self.pod_path)", "def _certificate_fingerprint(identity):\n fingerprint, stderr = _check_output([\n \"openssl\",\n \"x509\",\n \"-inform\",\n \"DER\",\n \"-noout\",\n \"-fingerprint\",\n ],\n inputstr=identity)\n fingerprint = fingerprint.strip()\n fingerprint = fingerprint.replace(\"SHA1 Fingerprint=\", \"\")\n fingerprint = fingerprint.replace(\":\", \"\")\n return fingerprint", "def compute_fingerprint(self):\n\t\t# initialize tensorflow session\n\t\tsess = tf.InteractiveSession()\n\t\tclf_image = wb_clf(config.clf_image_filename, reshape_size=None, input_dim=(100,200), \n\t\t\t\t\t\t\tname=\"image\", ALEXNET=True)\n\t\tclf_blot = wb_clf(config.clf_blot_filename, input_dim=(15,30), reshape_size=4*8*64, name=\"blot\")\n\t\tsess.run(tf.global_variables_initializer())\n\t\t# create western blot fingerprinting object\n\t\tWB = WesternBlot(clf_image=clf_image, clf_blot=clf_blot)\n\t\tWB.figure = self.figure\n\t\tWB.figure_gray = cv2.cvtColor(self.figure, cv2.COLOR_BGR2GRAY)\n\t\t# compute fingerprint\n\t\tWB.westernBlotExtractor(VISUALIZE=False)\n\t\tself.local_database = WB.Fingerprint", "def get_fingerprint(md5=False):\n sb = []\n sb.append(p.node())\n sb.append(p.architecture()[0])\n sb.append(p.architecture()[1])\n sb.append(p.machine())\n sb.append(p.processor())\n sb.append(p.system())\n sb.append(str(uuid.getnode())) # MAC address\n text = '#'.join(sb)\n if md5:\n return string_to_md5(text)\n else:\n return text", "def get_fingerprints(self, jid: JID) -> List[str]:\n return []", "def fingerprint_from_file(filename):\n cmd = flatten([gnupg_bin(), gnupg_home(), filename])\n outp = stderr_output(cmd).split('\\n')\n if not outp[0].startswith('pub'):\n raise CryptoritoError('probably an invalid gpg key')\n\n return outp[1].strip()", "def fingerprint(self, fingerprint_hash=None):\n try:\n fd, name = tempfile.mkstemp(prefix='sshkey-')\n with open(name, 'w') as fd:\n fd.write('{}'.format(self.line))\n if fingerprint_hash:\n p = Popen(('ssh-keygen', '-E', fingerprint_hash, '-lf', name), stdin=PIPE, stdout=PIPE, stderr=PIPE)\n else:\n p = Popen(('ssh-keygen', '-lf', name), stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = [str(v, 'utf-8') for v in p.communicate()]\n if p.returncode != 0:\n raise SSHKeyError('Error running ssh-keygen: returns {}'.format(p.returncode))\n os.unlink(name)\n return stdout.rstrip().split()[1].split(':', 1)[1]\n except Exception as e:\n raise SSHKeyError('Error getting fingerprint for {}: {}'.format(self.line, e))", "def get_short_fingerprint(length=6):\n assert 6 <= length <= 32\n #\n return get_fingerprint(md5=True)[-length:]", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def host_key_fingerprint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key_fingerprint\")", "def ssl_fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ssl_fingerprint\")", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def get_fingerprints(fp, format = None):\n\n spectrum, axis_freqs, axis_times = do_fft(*get_raw(fp, format))\n peaks = find_peaks(spectrum, axis_freqs, axis_times)\n fingerprints = calculate_fingerprints(peaks)\n\n return fingerprints", "def hash(self):\r\n sign_map = AutoVivification()\r\n digest = lambda x: self.__polynomial_hash(x)\r\n # We are only doing signatures for top levels\r\n for k, v in self.iteritems():\r\n # Digested value of the string representation of \r\n # what is behind.\r\n tmp = str(v)\r\n # Removed non meaningful information from the content.\r\n # No capital L is ever used in the register namings, so it is safe to strip that too.\r\n tmp = tmp.strip().replace('{','').replace('}','').replace(':','').replace(' ','').replace('L','')\r\n value = digest(tmp)\r\n sign_map[k] = string.atoi(value, 16)\r\n \r\n return sign_map", "def key_pair_finger_print(self) -> str:\n return pulumi.get(self, \"key_pair_finger_print\")", "def _fingerprint(self):\n hasher = hashlib.md5()\n source = inspect.getsource(self._func)\n hasher.update(source.encode('utf-8'))\n\n return hasher.hexdigest()", "def public_key(self):", "def fingerprint_from_var(var):\n vsn = gpg_version()\n cmd = flatten([gnupg_bin(), gnupg_home()])\n if vsn[0] >= 2 and vsn[1] < 1:\n cmd.append(\"--with-fingerprint\")\n\n output = polite_string(stderr_with_input(cmd, var)).split('\\n')\n if not output[0].startswith('pub'):\n raise CryptoritoError('probably an invalid gpg key')\n\n if vsn[0] >= 2 and vsn[1] < 1:\n return output[1] \\\n .split('=')[1] \\\n .replace(' ', '')\n\n return output[1].strip()", "def get_ssh_fingerprint(request, ip_address):\n try:\n ssh_fingerprint = usm_wrapper_utils.get_host_ssh_key(ip_address)\n except Exception, e:\n log.exception(e)\n return Response(\n {'message': 'Error while getting fingerprint'}, status=417)\n\n return Response({'ssh_key_fingerprint': ssh_fingerprint[0]}, status=200)", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def get_fingerprint(self, md='md5'):\n der = self.as_der()\n md = EVP.MessageDigest(md)\n md.update(der)\n digest = md.final()\n return hex(util.octx_to_num(digest))[2:-1].upper()", "def fingerprint(self) -> Text:\n return self.name", "def format_fingerprint(fpr):\n count = 0\n fingerprint = ''\n chunks = [i for i in re.split('([A-F0-9]{4})', fpr) if i]\n for chunk in chunks:\n count += 1\n fingerprint += ' %s' % chunk\n if count == len(chunks)/2:\n fingerprint += ' '\n\n return fingerprint.strip()", "def recipient_public_key(self):", "def get_fingerprint():\n print('Requesting fingerprint...')\n config = {\n 'host': os.environ.get('ACR_HOST'),\n 'access_key': os.environ.get('ACR_ACCESS_KEY'), \n 'access_secret': os.environ.get('ACR_ACCESS_SECRET'),\n 'timeout': 10\n }\n recognizer = ACRCloudRecognizer(config)\n mp3_path = helpers.get_mp3_output_path()\n start_seconds = 0\n rec_length = helpers.get_recording_length_seconds()\n result = json.loads(recognizer.recognize_by_file(mp3_path, start_seconds, rec_length))\n save_fingerprint_result_to_file(result)\n if int(result['status']['code']) == 0:\n try:\n song = {\n 'name': get_song_name_from_result(result),\n 'id': get_spotify_id_from_result(result),\n 'artist': get_first_artist_from_result(result),\n 'seconds_remaining': get_song_seconds_remaining(result),\n 'percent_remaining': get_song_percent_remaining(result),\n }\n print('Song Found: {}'.format(song['name']))\n return song\n except KeyError:\n pass\n return None", "def fingerprint():\n files = (glob.glob(base_dir + '**/*.html') +\n glob.glob(base_dir + '*.html') +\n glob.glob(base_dir + 'core.js'))\n\n md5s = OrderedDict()\n\n for fil in sorted(files):\n name = fil[len(base_dir):]\n with open(fil) as fp:\n md5 = hashlib.md5(fp.read().encode('utf-8')).hexdigest()\n md5s[name] = md5\n\n template = \"\"\"\\\"\\\"\\\"DO NOT MODIFY. Auto-generated by script/fingerprint_frontend.\\\"\\\"\\\"\n\nFINGERPRINTS = {}\n\"\"\"\n\n result = template.format(json.dumps(md5s, indent=4))\n\n with open(fingerprint_file, 'w') as fp:\n fp.write(result)", "def get_pub_key(self):\n return \"RSA {0}\".format(self._cert.get_pubkey().bits)", "def generateFingerprint(molecule, numInts=32, pathLength=7):\n \n paths = LinearPaths.generatePaths(molecule, maxdepth=pathLength)\n fp = Fingerprint.Fingerprint(numIntegers=numInts)\n\n for path in paths:\n fp.addPath(path)\n\n return fp", "def hashdict(self):\n return {\n 'pix': super(cfft, self).hashdict(),\n 'fft': hashlib.sha1(self.fft.view(np.uint8)).hexdigest()\n }", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def get_public_key(self, uid: str) -> str:\n return self.context.get(\n \"/dsum/public_key/%s\" % uid, None, \"DSum: failed retrieving the Curve 25519 private key with uid: %s\" % uid)['key']", "def PublicKey(self) -> _n_9_t_1:", "def PublicKey(self) -> _n_9_t_1:", "def get_commit_signer_fingerprint(commit: git.Commit) -> str:\n return CommitVerifier._get_commit_info(commit, pretty_format=\"%GF\")", "def fingerprint(self, file):\n raise NotImplementedError()", "def request_fingerprint(req: HttpRequest) -> str:\n fp = sha1()\n fp.update(req.method.encode() + b\"\\n\")\n fp.update(canonicalize_url(str(req.url)).encode() + b\"\\n\")\n for name, value in sorted(req.headers.items()):\n fp.update(f\"{name.title()}:{value}\\n\".encode())\n fp.update(b\"\\n\")\n fp.update(req.body)\n return fp.hexdigest()", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def call_fingerprint(cls, args, kwargs):\n return hasher(getcallargs(cls.__init__, None, *args, **kwargs))", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "def alt_stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i98\")", "def key_for_signature(self, data, sig):\n verification = self.verify(data, sig)\n return PublicKey.objects.filter(\n fingerprint=verification.fingerprint,\n profile__verified=True,\n ).first()", "def get_private_key(self) -> str:\n raise NotImplementedError(\"Please implement your own get_public_key() method\")", "def public_key(self):\n return f'PublicKey = {self._peer.public_key}'", "def get_public_key(self):\n return self.private_key.get_verifying_key()", "def hash_key(self):", "def private_key(self):", "def get_hostfingerprint_list(self):\n return self.hostfingerprint", "def key_from_keybase(username, fingerprint=None):\n url = keybase_lookup_url(username)\n resp = requests.get(url)\n if resp.status_code == 200:\n j_resp = json.loads(polite_string(resp.content))\n if 'them' in j_resp and len(j_resp['them']) == 1:\n kb_obj = j_resp['them'][0]\n if fingerprint:\n return fingerprint_from_keybase(fingerprint, kb_obj)\n else:\n if 'public_keys' in kb_obj \\\n and 'pgp_public_keys' in kb_obj['public_keys']:\n key = kb_obj['public_keys']['primary']\n return massage_key(key)\n\n return None", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def generate_symmetric_key():\n return Fernet.generate_key()", "def get_public_key(self) -> str:\n raise NotImplementedError(\"Please implement your own get_public_key() method\")", "def _show_fingerprints(self, jid: JID) -> None:\n fprs = self.get_fingerprints(jid)\n if len(fprs) == 1:\n self.api.information(\n 'Fingerprint for %s: %s' % (jid, fprs[0]),\n 'Info',\n )\n elif fprs:\n self.api.information(\n 'Fingerprints for %s:\\n\\t%s' % (jid, '\\n\\t'.join(fprs)),\n 'Info',\n )\n else:\n self.api.information(\n 'No fingerprints to display',\n 'Info',\n )", "def get_fingerprint_info_ext(fingerprint):\n extension = _FINGERPRINT_NOT_FOUND % fingerprint\n return extension", "def compute_fingerprint(path_list):\n\n hasher = hashlib.sha1()\n\n for path_item in path_list:\n\n # For directories, create a hash based on the modification times\n # of first-level subdirectories\n if os.path.isdir(path_item):\n for dirname in sorted(os.listdir(path_item)):\n path_name = os.path.join(path_item, dirname)\n if os.path.isdir(path_name):\n hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8'))\n\n # For files, hash the contents of the file\n if os.path.isfile(path_item):\n with open(path_item, \"rb\") as file_handle:\n hasher.update(file_handle.read())\n\n return hasher.hexdigest()", "def gen_Fernet_key():\n\tkey = Fernet.generate_key()\n\treturn key", "def _GetServerKey(self, peer_id):\n return hashlib.sha224(peer_id + self.network_id).hexdigest()", "def _save_fingerprint(self):\n path = os.path.join(self._cache_path, '%s.fingerprint' % self._name)\n\n if not os.path.exists(self._cache_path):\n os.makedirs(self._cache_path)\n\n with open(path, 'w') as f:\n f.write(self._fingerprint())", "def _load_fingerprint(self):\n path = os.path.join(self._cache_path, '%s.fingerprint' % self._name)\n\n if not os.path.exists(path):\n return None\n\n with open(path) as f:\n fingerprint = f.read()\n\n return fingerprint", "def generate_hash(self):\n if not self.public_key:\n raise ValueError('Requires a public publicKey')\n return self.public_key.encode(encoding='bytes')", "def song_fp(samples, prnt_spectro=False):\n\n t, f = peak_find(samples, prnt_spectro)\n\n fingerprints = set()\n for n, t1 in enumerate(t) :\n for j, t2 in enumerate(t[min(n + 1, len(t) - 1) : min(n + 20, len(t))]) :\n if n + j + 1 < len(t) :\n fingerprints.add((f[n], f[n + j + 1], t2 - t1))\n\n return frozenset(fingerprints)", "def ssl_fingerprint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_fingerprint\")", "def fingerprint(path, fs_options={}, fs=None):\n fs, path = parse(path, fs_options, fs=fs)\n path = stringyfy(path)\n if fs is None:\n mtime = os.path.getmtime(path)\n size = os.path.getsize(path)\n else:\n info = fs.get_file_info([path])[0]\n mtime = info.mtime_ns\n size = info.size\n import vaex.cache\n return vaex.cache.fingerprint(('file', (path, mtime, size)))", "def stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i99\")", "def compute_fingerprint(path_list):\r\n\r\n hasher = hashlib.sha1()\r\n\r\n for path in path_list:\r\n\r\n # For directories, create a hash based on the modification times\r\n # of first-level subdirectories\r\n if os.path.isdir(path):\r\n for dirname in sorted(os.listdir(path)):\r\n p = os.path.join(path, dirname)\r\n if os.path.isdir(p):\r\n hasher.update(str(os.stat(p).st_mtime))\r\n\r\n # For files, hash the contents of the file\r\n if os.path.isfile(path):\r\n with open(path, \"rb\") as file_handle:\r\n hasher.update(file_handle.read())\r\n\r\n return hasher.hexdigest()", "def make_public_key(prime, base, rnumber):\n\n pub_key = (base ** rnumber) % prime\n return pub_key", "def get_ssh_pkey(self, record):\n return 'A REMPLIR '", "def hash_str(self):\n return '___'.join([self.key.kind(), self.key.string_id(),\n self._Hash()])", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()", "def GetName(self):\r\n return 'RSA-SHA256'", "def public_key(self):\n keyfile = self._get_field('System', 'keyfile')\n return join(self.key_path, keyfile)", "def generate_key(seed):\n private_key = sha256(seed)\n public_key = privtopub(private_key)\n return {\"private\": private_key, \"public\": public_key}", "async def server_public_key(self) -> bytes:\n raise NotImplementedError", "def gen_key(self):\n\n if len(self.fields) == 0:\n return None\n\n # we do not call self.validate() here as self._id will always be null,\n # so we call self.validator.validate on the schema. This will validate\n # that 'effectiveDate', 'carrier', and 'flightNumber' are not None\n # and of valid data type\n if self.validator.validate(self.fields) == False:\n return None\n\n h = hashlib.md5()\n h.update(self.fields['effectiveDate'].isoformat())\n h.update(str(self.fields['carrier']))\n h.update(str(self.fields['flightNumber']))\n\n return h.hexdigest()", "def public_key(self): # pragma: no cover\n raise NotImplementedError()", "def get_pub_key(priv_key: rsa.RSAPrivateKey) -> rsa.RSAPublicKey:\n return priv_key.public_key()", "def __repr__(self, verbose=False):\r\n rep = \"Private key for n = {n}:\\n\\n\".format(n=self.n)\r\n rep += \"f = {f}\\n\\n\".format(f=self.f)\r\n rep += \"g = {g}\\n\\n\".format(g=self.g)\r\n rep += \"F = {F}\\n\\n\".format(F=self.F)\r\n rep += \"G = {G}\\n\\n\".format(G=self.G)\r\n if verbose:\r\n rep += \"\\nFFT tree\\n\"\r\n rep += print_tree(self.T_fft, pref=\"\")\r\n return rep" ]
[ "0.787024", "0.7284799", "0.72343296", "0.705091", "0.6990618", "0.6967752", "0.6874012", "0.6863513", "0.68291414", "0.6696847", "0.66854084", "0.6662513", "0.6511653", "0.6490654", "0.6474358", "0.64719266", "0.6441411", "0.64376676", "0.63801914", "0.63442427", "0.63356423", "0.6302114", "0.6269299", "0.62295157", "0.6205228", "0.617533", "0.6153148", "0.61451286", "0.6136877", "0.61357933", "0.6107275", "0.6107275", "0.6107275", "0.6054367", "0.60364825", "0.6003217", "0.598746", "0.59623176", "0.59434336", "0.5934576", "0.59328395", "0.5923619", "0.5903781", "0.58960587", "0.58874995", "0.58487916", "0.5783502", "0.5780899", "0.57806844", "0.57776314", "0.577073", "0.57366073", "0.5721635", "0.57177615", "0.57074046", "0.5694961", "0.5694961", "0.56781405", "0.5675862", "0.5675174", "0.5674397", "0.56731266", "0.5669223", "0.5667033", "0.5656112", "0.56496406", "0.5609424", "0.56010455", "0.5593995", "0.55767363", "0.55684304", "0.55485696", "0.55472386", "0.5532312", "0.55228853", "0.55219394", "0.5519971", "0.55181676", "0.5510474", "0.5506008", "0.54942006", "0.549253", "0.5455404", "0.5452278", "0.54511446", "0.54487556", "0.54298055", "0.54294604", "0.5411991", "0.54068655", "0.53937507", "0.5382494", "0.5359381", "0.5359228", "0.5354908", "0.5352821", "0.5347062", "0.53445894", "0.53377056", "0.53322685" ]
0.70989317
3
Loads a public key from a DER or PEMformatted file. Supports RSA, DSA and EC public keys. For RSA keys, both the old RSAPublicKey and SubjectPublicKeyInfo structures are supported. Also allows extracting a public key from an X.509 certificate.
def parse_public(data): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) key_type = None # Appears to be PEM formatted if re.match(b'\\s*-----', data) is not None: key_type, algo, data = _unarmor_pem(data) if key_type == 'private key': raise ValueError(pretty_message( ''' The data specified does not appear to be a public key or certificate, but rather a private key ''' )) # When a public key returning from _unarmor_pem has a known algorithm # of RSA, that means the DER structure is of the type RSAPublicKey, so # we need to wrap it in the PublicKeyInfo structure. if algo == 'rsa': return PublicKeyInfo.wrap(data, 'rsa') if key_type is None or key_type == 'public key': try: pki = PublicKeyInfo.load(data) # Call .native to fully parse since asn1crypto is lazy pki.native return pki except (ValueError): pass # Data was not PublicKeyInfo try: rpk = RSAPublicKey.load(data) # Call .native to fully parse since asn1crypto is lazy rpk.native return PublicKeyInfo.wrap(rpk, 'rsa') except (ValueError): pass # Data was not an RSAPublicKey if key_type is None or key_type == 'certificate': try: parsed_cert = Certificate.load(data) key_info = parsed_cert['tbs_certificate']['subject_public_key_info'] return key_info except (ValueError): pass # Data was not a cert raise ValueError('The data specified does not appear to be a known public key or certificate format')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_public_key(file_path: str, encoding: Encoding = None) -> PublicKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PublicKey:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param key_data: given public keys data\n :return: loaded public key\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_public_key,\n Encoding.DER: load_der_public_key\n }[real_encoding](key_data, default_backend())\n\n return generic_load(file_path, solve)", "def load_public_key(filename):\n\twith open(str(filename) + \"_pub_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_public_key(\n\t\tkey_file.read(),\n\t\tbackend=default_backend()\n\t)", "def import_public_key_from_pem_file(filename):\n with open(filename, \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(key_file.read(), backend=default_backend())\n return public_key", "def rsa_file_to_publickey(filename):\r\n fileobject = file(filename,'r')\r\n publickeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_publickey(publickeystring)", "def import_public_key_from_cert_file(filename):\n with open(filename, \"rb\") as key_file:\n cert = x509.load_pem_x509_certificate(key_file.read(), backend=default_backend())\n return cert.public_key()", "def load_pub_key_bytes(bs: bytes) -> rsa.RSAPublicKey:\n k = serialization.load_pem_public_key(bs)\n assert isinstance(k, rsa.RSAPublicKey)\n return k", "def save_rsa_public_key(public_key: RSAPublicKey, file_path: str, encoding: Encoding = Encoding.PEM) -> None:\n pem_data = public_key.public_bytes(encoding, serialization.PublicFormat.PKCS1)\n with open(file_path, 'wb') as f:\n f.write(pem_data)", "def import_public_key_from_pem_data(pem_data):\n if not pem_data.startswith(PREFIX):\n pem_data = bytes(\"{}\\n{}\\n{}\".format(PREFIX, pem_data, POSTFIX), \"utf-8\")\n else:\n pem_data = bytes(pem_data, \"utf-8\")\n cert = x509.load_pem_x509_certificate(pem_data, default_backend())\n return cert.public_key()", "def _LoadSshPublicKey(ssh_public_key_path):\n key_path = os.path.expanduser(ssh_public_key_path)\n if not os.path.exists(key_path):\n raise errors.DriverError(\n \"SSH public key file %s does not exist.\" % key_path)\n\n with open(key_path) as f:\n rsa = f.read()\n rsa = rsa.strip() if rsa else rsa\n utils.VerifyRsaPubKey(rsa)\n return rsa", "def _get_pubkey_from_der_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_der_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def get_public_key(self):\n# _log.debug(\"get_public_key\")\n certpath, cert, certstr = self.get_own_cert()\n try:\n cert = load_pem_x509_certificate(certstr, default_backend())\n except Exception as err:\n _log.error(\"Failed to load X509 certificate from PEM, err={}\".format(err))\n raise\n return cert.public_key()", "def solve(key_data: bytes) -> PublicKey:\n return { # type: ignore\n Encoding.PEM: load_pem_public_key,\n Encoding.DER: load_der_public_key\n }[real_encoding](key_data, default_backend())", "def test_public_key_rsa(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_RSA)", "def Read(key):\n rsa = json.loads(key)\n params = {'modulus' : util.Decode(rsa['modulus']),\n 'publicExponent' : util.Decode(rsa['publicExponent'])}\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])", "def Read(key):\n rsa = json.loads(key)\n params = {\n 'modulus': util.Base64WSDecode(rsa['modulus']),\n 'publicExponent': util.Base64WSDecode(rsa['publicExponent'])\n }\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])", "def public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|publickey'\"\n )\n\n data_dec = ctx.data\n if ctx.ref_encoding == \"base64\":\n data_dec = base64.b64decode(data_dec).decode()\n\n private_key = serialization.load_pem_private_key(\n data_dec.encode(), password=None, backend=default_backend()\n )\n public_key = private_key.public_key()\n\n ctx.data = str(\n public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ),\n \"UTF-8\",\n )", "def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key", "def _get_pubkey_from_pem_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_pem_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def load_rsa_key(key, key_type, key_encoding):\n # (bytes, EncryptionKeyType, KeyEncodingType) -> Any\n # narrow down the output type\n # https://github.com/aws/aws-dynamodb-encryption-python/issues/66\n try:\n loader = _RSA_KEY_LOADING[key_type][key_encoding]\n except KeyError:\n raise ValueError(\"Invalid key type and encoding: {} and {}\".format(key_type, key_encoding))\n\n kwargs = dict(data=key, backend=default_backend())\n if key_type is EncryptionKeyType.PRIVATE:\n kwargs[\"password\"] = None\n\n loaded_key = loader(**kwargs)\n\n if loaded_key.key_size < MinimumKeySizes.RSA.value:\n _LOGGER.warning(\"RSA keys smaller than %d bits are unsafe\", MinimumKeySizes.RSA.value)\n\n return loaded_key", "def load_key(fn, psw=None):\n if not fn:\n die(\"Need private key\")\n if psw:\n psw = as_bytes(psw)\n data = load_gpg_file(fn)\n key = load_pem_private_key(data, password=psw, backend=get_backend())\n return key", "def rsa_public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: RSA public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|rsapublic'\"\n )\n\n public_key(ctx)", "def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)", "def Read(key):\n\n dsa = json.loads(key)\n params = {\n 'y': util.Base64WSDecode(dsa['y']),\n 'p': util.Base64WSDecode(dsa['p']),\n 'g': util.Base64WSDecode(dsa['g']),\n 'q': util.Base64WSDecode(dsa['q'])\n }\n pubkey = DSA.construct(\n (util.BytesToLong(params['y']), util.BytesToLong(params['g']),\n util.BytesToLong(params['p']), util.BytesToLong(params['q'])))\n return DsaPublicKey(params, pubkey, dsa['size'])", "def _wrap_publickey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PublicKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize public key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_public_key(der, backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def Read(key):\n\n dsa = json.loads(key)\n params = {'y' : util.Decode(dsa['y']),\n 'p' : util.Decode(dsa['p']),\n 'g' : util.Decode(dsa['g']),\n 'q' : util.Decode(dsa['q'])}\n pubkey = DSA.construct((util.BytesToLong(params['y']),\n util.BytesToLong(params['g']),\n util.BytesToLong(params['p']),\n util.BytesToLong(params['q'])))\n return DsaPublicKey(params, pubkey, dsa['size'])", "def load_received_public_key_der(self, public_key_der):\n return self.load_received_public_key(VerifyingKey.from_der(public_key_der))", "def import_publickey_cert_pem(self, cert_pemstring, privkey_pemstring=None):\n ## TODO: This method is not tested. It may have bugs.\n if isinstance(cert_pemstring, str):\n cert_pemstring = cert_pemstring.encode('utf-8')\n cert = x509.load_pem_x509_certificate(cert_pemstring, default_backend())\n fingerprint = cert.fingerprint(hashes.SHA256())\n\n if privkey_pemstring is not None:\n self.mk_keyobj_from_private_key_pem(privkey_pemstring)\n sig = self.private_key_obj.sign(fingerprint, ec.ECDSA(utils.Prehashed(hashes.SHA256())))\n public_key = cert.public_key()\n result = public_key.verify(sig, fingerprint, ec.ECDSA(hashes.SHA256()))\n if not result:\n return False\n self.private_key_obj = public_key\n self._get_naive_private_key_bytes()\n\n self._get_naive_public_key_bytes()\n return True", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def loadKey (self, filename=\"pub.key\"):\n try:\n key_file = open(filename, \"r\")\n data = key_file.read()\n aux = data.split(\";\")\n self.n = int(aux[0])\n self.n_sq = int(aux[1])\n self.g = int(aux[2])\n except:\n raise Exception(\"could not load key from file: \" + filename)", "def get_public_key_in_der(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def get_public_key_in_pem(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def get_pub_rsa_key(pub_key):\n return RSA.importKey(pub_key)", "def load_cert(file, format=FORMAT_PEM):\n bio = BIO.openfile(file)\n if format == FORMAT_PEM:\n return load_cert_bio(bio)\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)\n else:\n raise ValueError(\"Unknown format. Must be either FORMAT_DER or FORMAT_PEM\")", "def test_set_public_key(self) -> None:\n\n expected = self.pem_public_key.decode()\n\n encryptor = DataEncryption(public_key=self.pem_public_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_public_key.public_bytes(\n serialization.Encoding.PEM, serialization.PublicFormat.PKCS1\n ).decode()\n\n self.assertEqual(expected, actual)", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def test_get_public_key(self):\n with patch('{}.open'.format(__name__), open_priv_pub):\n with open('tests/adbkey.pub', 'rb') as f:\n pub = f.read()\n\n self.assertEqual(pub, self.signer.GetPublicKey())", "def load_received_public_key_pem(self, public_key_pem):\n return self.load_received_public_key(VerifyingKey.from_pem(public_key_pem))", "def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:\n key = ECKey(privkey)\n return key.get_public_key(compressed)", "def get_pubkey_from_file(filename: str) -> Tuple[Optional[SupportedKeyTypes], Optional[int]]:\n with open(filename, \"rb\") as fobj:\n filedata = fobj.read()\n pubkey, keyidv2 = get_pubkey(filedata)\n if pubkey:\n if not isinstance(pubkey, (RSAPublicKey, EllipticCurvePublicKey)):\n raise ValueError(f\"Unsupported key type {type(pubkey).__name__}\")\n return pubkey, keyidv2\n\n return None, None", "def _get_pubkey_from_der_x509_certificate(filedata: bytes, backend: Any) -> Tuple[Any, Optional[int]]:\n try:\n cert = x509.load_der_x509_certificate(filedata, backend=backend)\n return cert.public_key(), _get_keyidv2_from_cert(cert)\n except Exception:\n return None, None", "def get_public_key():\n\n ssh_conf_path = os.path.expanduser('~/.ssh')\n\n dsa_public_key_path = os.path.join(ssh_conf_path, 'id_dsa.pub')\n dsa_private_key_path = os.path.join(ssh_conf_path, 'id_dsa')\n\n rsa_public_key_path = os.path.join(ssh_conf_path, 'id_rsa.pub')\n rsa_private_key_path = os.path.join(ssh_conf_path, 'id_rsa')\n\n has_dsa_keypair = os.path.isfile(dsa_public_key_path) and \\\n os.path.isfile(dsa_private_key_path)\n has_rsa_keypair = os.path.isfile(rsa_public_key_path) and \\\n os.path.isfile(rsa_private_key_path)\n\n if has_dsa_keypair:\n print 'DSA keypair found, using it'\n public_key_path = dsa_public_key_path\n\n elif has_rsa_keypair:\n print 'RSA keypair found, using it'\n public_key_path = rsa_public_key_path\n\n else:\n print 'Neither RSA nor DSA keypair found, creating DSA ssh key pair'\n system('ssh-keygen -t dsa -q -N \"\" -f %s' % dsa_private_key_path)\n public_key_path = dsa_public_key_path\n\n public_key = open(public_key_path, 'r')\n public_key_str = public_key.read()\n public_key.close()\n\n return public_key_str", "def public_key(\n self,\n key: str,\n default: Any = undefined,\n description: str = None,\n key_format: Optional[EncryptionKeyFormat] = None,\n **kwargs\n ) -> Optional[PublicKey]:\n cast_key = partial(cast_public_key, key_format=key_format)\n return self._process(key, description=description, default=default, cast=cast_key, type=PublicKey, **kwargs)", "def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)", "def load_certificate(file_path: str, encoding: Encoding = None) -> Certificate:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(certificate_data: bytes) -> Certificate:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param certificate_data: given certificate data\n :return: loaded certificate\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_x509_certificate,\n Encoding.DER: load_der_x509_certificate\n }[real_encoding](certificate_data, default_backend())\n\n return generic_load(file_path, solve)", "def get_public_key(cert_file):\n # Use OpenSSL to extract public key\n command = 'openssl x509 -inform pem -in %s -pubkey -noout'\n command = command % cert_file\n command = shlex.split(command)\n\n pipe = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n key_ascii = pipe.stdout.read()\n\n return key_ascii", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {'privateExponent': util.Decode(rsa['privateExponent']),\n 'primeP': util.Decode(rsa['primeP']),\n 'primeQ': util.Decode(rsa['primeQ']),\n 'primeExponentP': util.Decode(rsa['primeExponentP']),\n 'primeExponentQ': util.Decode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Decode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def load_key(self, pemfile_path_abs: str, set_priv=False) -> None:\n return None", "def load_keys(self, load_path=DEFAULT_KEY_PATH):\n try:\n with open(f'{load_path}/id_elgamal', 'r') as f:\n f.read(self.keys['private'])\n with open(f'{load_path}/id_elgamal.pub', 'r') as f:\n self.keys['public']['p'] = f.readline()\n self.keys['public']['g'] = f.readline()\n self.keys['public']['y'] = f.readline()\n debug_message('Loading successful!')\n return self.keys\n except FileNotFoundError:\n debug_message(f'Loading error! ({FileNotFoundError})')\n return 0", "def import_public_key(self, raw_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_import_public_key(self.ctx, raw_key.ctx, error)\n VscfStatus.handle_status(error.status)\n instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))\n return instance", "def load_pem_x509_certificate(data):\n return _x509.load_pem_x509_certificate(data, _backends.default_backend())", "def import_private_key_from_pem_file(filename, passphrase=None):\n with open(filename, \"rb\") as key_file:\n private_key = serialization.load_pem_private_key(\n key_file.read(), password=passphrase, backend=default_backend()\n )\n return private_key", "def rsa_public_key(self, modulus: int, exponent: int) -> rsa.RSAPublicKey:\n return rsa.RSAPublicNumbers(exponent, modulus).public_key(default_backend())", "def _get_pubkey_from_pem_x509_certificate(filedata: bytes, backend: Any) -> Tuple[Any, Optional[int]]:\n try:\n cert = x509.load_pem_x509_certificate(filedata, backend=backend)\n return cert.public_key(), _get_keyidv2_from_cert(cert)\n except Exception:\n return None, None", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {\n 'privateExponent': util.Base64WSDecode(rsa['privateExponent']),\n 'primeP': util.Base64WSDecode(rsa['primeP']),\n 'primeQ': util.Base64WSDecode(rsa['primeQ']),\n 'primeExponentP': util.Base64WSDecode(rsa['primeExponentP']),\n 'primeExponentQ': util.Base64WSDecode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Base64WSDecode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def get_google_public_cert_key() -> RSAPublicKey:\n r = requests.get(GOOGLE_PUBLIC_CERT_URL)\n r.raise_for_status()\n\n # Load the certificate.\n certificate = x509.load_pem_x509_certificate(r.content, default_backend())\n\n # Get the certicate's public key.\n public_key = certificate.public_key()\n\n return public_key", "def read_pk(filename):\n with open(filename, 'rb') as fd:\n ret = pickle.load(fd)\n return ret", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def import_key(key: str) -> RSA.RsaKey:\n\n return RSA.importKey(binascii.unhexlify(key))", "def _rsa_keydict_to_keyobj(publickey = None, privatekey = None):\r\n if publickey is not None:\r\n if not rsa_is_valid_publickey(publickey):\r\n raise ValueError, \"Invalid public key\"\r\n \r\n if privatekey is not None: \r\n if not rsa_is_valid_privatekey(privatekey):\r\n raise ValueError, \"Invalid private key\"\r\n \r\n if publickey is None and privatekey is None:\r\n raise TypeError(\"Must provide either private or public key dictionary\")\r\n\r\n if publickey is None: \r\n publickey = {}\r\n if privatekey is None: \r\n privatekey = {}\r\n \r\n n = None \r\n e = None\r\n d = None\r\n p = None\r\n q = None\r\n \r\n if 'd' in privatekey: \r\n d = long(privatekey['d'])\r\n if 'p' in privatekey: \r\n p = long(privatekey['p'])\r\n if 'q' in privatekey: \r\n q = long(privatekey['q']) \r\n \r\n if 'n' in publickey: \r\n n = long(publickey['n'])\r\n # n is needed for a private key even thought it is not\r\n # part of the standard public key dictionary.\r\n else: n = p*q \r\n if 'e' in publickey: \r\n e = long(publickey['e'])\r\n \r\n rsa_implementation = RSA_RSAImplementation()\r\n rsa_key = rsa_implementation.construct((n,e,d,p,q))\r\n \r\n return rsa_key", "async def retrieve_public_key(self, kid: str) -> str:\n\n directory = tedious.config.CONFIG['KEYS']['public-keys']\n async with aiofiles.open(os.path.join(directory, kid), mode='r') as file:\n public_key = await file.read()\n return public_key", "def _try_load_ca_private_key(path):\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'rb').read())\n if pkey.bits() < 2048:\n raise ValueError(\"I'm sorry Dave, I can't let you use a small \"\n \"RSA key.\")\n pkey.check()\n return pkey", "def load_private_key(file_path: str, password: bytes = None,\n encoding: Encoding = None) -> PrivateKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PrivateKey:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param key_data: given private keys data\n :return: loaded private key\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())\n\n return generic_load(file_path, solve)", "def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()", "def test_public_key_req(self):\n csr = \"\"\"-----BEGIN CERTIFICATE REQUEST-----\nMIHcMIGDAgEAMCExDzANBgNVBAMMBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwWTAT\nBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQiDp4E4+/kzbPgA22wm6RuKYpZfTiVqcR0\nAuxu7bE0IMFcnQgnhJ3e7gbWq+spfSYEd3vJQ8a6L7tu+nTziY1qoAAwCgYIKoZI\nzj0EAwIDSAAwRQIhAMRpKf1c6Z0qgTCNxyKXZGsc4i/qxfqxzcZ/QK7Ot9TeAiA7\nAPUerdBAf4HdigxiwcckjZ8TG1snkyp/qVuMhxSDEg==\n-----END CERTIFICATE REQUEST-----\"\"\"\n x509req = crypto.load_certificate_request(PEM, csr)\n self.assertEqual(utils.public_key_type(x509req), c.KEY_EC)", "def load(self):\n del self[0:len(self)]\n\n if not os.path.isfile(self.path):\n self.log.debug('No such file: {}'.format(self.path))\n return\n\n for line in [l.rstrip() for l in open(self.path, 'r').readlines()]:\n if line.startswith('#') or line.strip() == '':\n continue\n\n try:\n self.append(OpenSSHPublicKey(line))\n except SSHKeyError:\n pass", "def serializePublicKey(public_key):\n\treturn public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t)", "def deserializePublicKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_public_key(string , backend = bc)", "def load_private_key(filename):\n\twith open(str(filename) + \"_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_private_key(\n\t\tkey_file.read(),\n\t\tpassword=None,\n\t\tbackend=default_backend()\n\t)", "def load_received_public_key(self, public_key):\n if not self.curve:\n self.curve = public_key.curve\n if self.curve != public_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.public_key = public_key", "def _load_private_key(self, filename, keytype=None):\n type_map = {\n 'dsa': ssh.DSSKey,\n 'rsa': ssh.RSAKey}\n\n if keytype is None:\n with open(filename, 'rb') as k:\n keydata = k.read()\n \n m = re.search(\"BEGIN (.*?) PRIVATE KEY\", keydata)\n if m:\n keytype = m.group(1)\n\n keycls = type_map.get(keytype.lower(), 'dsa')\n\n try:\n key = keycls.from_private_key_file(filename)\n log.debug(\"Loaded key '%s' without password.\", filename)\n except ssh.PasswordRequiredException:\n passphrase = self.config.get('passphrase')\n \n if callable(passphrase):\n passphrase = passphrase(filename,\n self.config.get('remote_host', 'localhost'),\n self.config.get('username', getpass.getuser()))\n if passphrase is None:\n return\n\n if not passphrase:\n passphrase = getpass.getpass(\"Key passphrase: \")\n \n key = keycls.from_private_key_file(filename, passphrase)\n\n return key", "def store_public_key(public_key,filename):\n\twith open(str(filename) +'_pub_key.pem','wb') as fin:\n\t\tpem = public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t\t)\n\t\tfin.write(pem)", "def load_x509_cert(url, httpc, spec2key, **get_args):\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []", "def get_pub_key(priv_key: rsa.RSAPrivateKey) -> rsa.RSAPublicKey:\n return priv_key.public_key()", "def loadKey(self, filename=\"priv.key\"):\n try:\n key_file = open(filename, \"r\")\n data = key_file.read()\n aux = data.split(\";\")\n self.lamb = int(aux[0])\n self.mu = int(aux[1])\n except:\n raise Exception(\"could not load key from file: \" + filename)", "def Read(key):\n dsa = json.loads(key)\n pub = DsaPublicKey.Read(json.dumps(dsa['publicKey']))\n params = { 'x' : util.Decode(dsa['x']) }\n key = DSA.construct((util.BytesToLong(pub._params['y']),\n util.BytesToLong(pub._params['g']),\n util.BytesToLong(pub._params['p']),\n util.BytesToLong(pub._params['q']),\n util.BytesToLong(params['x'])))\n return DsaPrivateKey(params, pub, key, dsa['size'])", "def __init__(self, pubkey, e=65537):\n if isinstance(pubkey, int):\n self.key = RSA.RsaKey(n=pubkey, e=e)\n\n else:\n if not isinstance(pubkey, str):\n raise ValueError('pubkey must be str or int.')\n\n if '----' in pubkey:\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)\n else:\n if pubkey == pubkey.lower():\n pubkey = int(pubkey, 16)\n self.key = RSA.RsaKey(n=pubkey, e=e)\n else:\n pubkey = '-----BEGIN PUBLIC KEY-----\\n' + pubkey + '\\n-----END PUBLIC KEY-----'\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)", "def test_set_public_key_setter(self) -> None:\n\n expected = self.pem_public_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_public_key(self.pem_public_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_public_key.public_bytes(\n serialization.Encoding.PEM, serialization.PublicFormat.PKCS1\n ).decode()\n\n self.assertEqual(expected, actual)", "def rsa_string_to_publickey(mystr):\r\n if len(mystr.split()) != 2:\r\n raise ValueError, \"Invalid public key string\"\r\n \r\n return {'e':long(mystr.split()[0]), 'n':long(mystr.split()[1])}", "def __init__(self, public_key):\n self._pk = ed25519.Ed25519PublicKey.from_public_bytes(public_key.bytes)", "def convert_public_key_to_ecdsa(self, public_key):\n return PublicKey.fromPem('\\n-----BEGIN PUBLIC KEY-----\\n'+public_key+'\\n-----END PUBLIC KEY-----\\n')", "def rsa_public_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PublicKeyInfo())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid public key encoding.\")\n\n return decoded_key[\"publicKey\"].asOctets()", "def Read(key):\n dsa = json.loads(key)\n pub = DsaPublicKey.Read(json.dumps(dsa['publicKey']))\n params = {'x': util.Base64WSDecode(dsa['x'])}\n key = DSA.construct((util.BytesToLong(pub._params['y']),\n util.BytesToLong(pub._params['g']),\n util.BytesToLong(pub._params['p']),\n util.BytesToLong(pub._params['q']),\n util.BytesToLong(params['x'])))\n return DsaPrivateKey(params, pub, key, dsa['size'])", "def rsa_is_valid_publickey(key):\r\n # must be a dict\r\n if type(key) is not dict:\r\n return False\r\n\r\n # missing the right keys\r\n if 'e' not in key or 'n' not in key:\r\n return False\r\n\r\n # has extra data in the key\r\n if len(key) != 2:\r\n return False\r\n\r\n for item in ['e', 'n']:\r\n # must have integer or long types for the key components...\r\n if type(key[item]) is not int and type(key[item]) is not long:\r\n return False\r\n\r\n if key['e'] < key['n']:\r\n # Seems valid...\r\n return True\r\n else:\r\n return False", "def test_get_public_key(self) -> None:\n\n expected = self.pem_public_key\n\n encryptor = DataEncryption()\n encryptor.set_public_key(self.pem_public_key.decode())\n\n actual = encryptor.get_public_key()\n\n self.assertEqual(expected, actual)", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def get_pubkey(filedata: bytes) -> Tuple[Optional[SupportedKeyTypes], Optional[int]]:\n default_be = backends.default_backend()\n for func in [\n _get_pubkey_from_der_x509_certificate,\n _get_pubkey_from_pem_x509_certificate,\n _get_pubkey_from_der_public_key,\n _get_pubkey_from_pem_public_key,\n _get_pubkey_from_der_private_key,\n _get_pubkey_from_pem_private_key,\n ]:\n pubkey, keyidv2 = func(filedata, default_be)\n if pubkey:\n if not isinstance(pubkey, (RSAPublicKey, EllipticCurvePublicKey)):\n raise ValueError(f\"Unsupported key type {type(pubkey).__name__}\")\n return pubkey, keyidv2\n\n return None, None", "def test_create_keypair_from_file(self):\n keys = RSA.generate(1024)\n nova_utils.save_keys_to_files(keys=keys, pub_file_path=pub_file_path)\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)", "def _load_key(self, path):\n with open(path, 'r') as f:\n self._key = f.readline().strip()\n self._secret = f.readline().strip()", "def public_key(self):\n return PublicKey(self._sk.public_key().public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw))", "def test_set_public_key_setter_pem_str(self) -> None:\n\n expected = self.pem_public_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_public_key(self.pem_public_key.decode())\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_public_key.public_bytes(\n serialization.Encoding.PEM, serialization.PublicFormat.PKCS1\n ).decode()\n\n self.assertEqual(expected, actual)", "def read_private_key_file(pkey_file,\n pkey_password=None,\n key_type=None,\n logger=None):\n ssh_pkey = None\n key_types = (paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey)\n if hasattr(paramiko, 'Ed25519Key'):\n # NOQA: new in paramiko>=2.2: http://docs.paramiko.org/en/stable/api/keys.html#module-paramiko.ed25519key\n key_types += (paramiko.Ed25519Key, )\n for pkey_class in (key_type,) if key_type else key_types:\n try:\n ssh_pkey = pkey_class.from_private_key_file(\n pkey_file,\n password=pkey_password\n )\n if logger:\n logger.debug('Private key file ({0}, {1}) successfully '\n 'loaded'.format(pkey_file, pkey_class))\n break\n except paramiko.PasswordRequiredException:\n if logger:\n logger.error('Password is required for key {0}'\n .format(pkey_file))\n break\n except paramiko.SSHException:\n if logger:\n logger.debug('Private key file ({0}) could not be loaded '\n 'as type {1} or bad password'\n .format(pkey_file, pkey_class))\n return ssh_pkey", "def get_pubkey(pem):\n der = ssl.PEM_cert_to_DER_cert(pem)\n\n # Extract subjectPublicKeyInfo field from X.509 certificate (see RFC3280)\n cert = DerSequence()\n cert.decode(der)\n tbsCertificate = DerSequence()\n tbsCertificate.decode(cert[0])\n subjectPublicKeyInfo = tbsCertificate[6]\n\n return subjectPublicKeyInfo", "def import_key_pair(self, key_name, public_key_material):\r\n public_key_material = base64.b64encode(public_key_material)\r\n params = {'KeyName' : key_name,\r\n 'PublicKeyMaterial' : public_key_material}\r\n return self.get_object('ImportKeyPair', params, KeyPair, verb='POST')", "def get_key_pair_from_pvk_pem_file(fpath: str) -> typing.Tuple[bytes, bytes]:\n pvk = _get_bytes_from_pem_file(fpath).decode(\"UTF-8\")\n sk = ecdsa.SigningKey.from_pem(pvk)\n\n return _get_key_pair_from_sk(sk)", "def load_private_key_der(self, private_key_der):\n return self.load_private_key(SigningKey.from_der(private_key_der))", "def test_create_and_import_encrypted_rsa(self):\n name = \"key_encrypted\"\n password = \"123456\"\n bits= 3072\n generate_and_write_rsa_keypair(name, bits, password)\n private_key = import_rsa_key_from_file(name, password)\n public_key = import_rsa_key_from_file(name + \".pub\")\n\n securesystemslib.formats.KEY_SCHEMA.check_match(private_key)\n self.assertTrue(private_key[\"keyval\"].get(\"private\"))\n self.assertTrue(\n securesystemslib.formats.PUBLIC_KEY_SCHEMA.matches(public_key))", "def load_received_public_key_bytes(self, public_key_str):\n return self.load_received_public_key(\n VerifyingKey.from_string(public_key_str, self.curve))", "def get_pub_key_bytes(priv_key: rsa.RSAPrivateKey) -> bytes:\n k = priv_key.public_key()\n return k.public_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)", "def import_key(\n key_name, public_key_material, region=None, key=None, keyid=None, profile=None\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n key = conn.import_key_pair(key_name, public_key_material)\n log.debug(\"the key to return is : %s\", key)\n return key.fingerprint\n except boto.exception.BotoServerError as e:\n log.debug(e)\n return False", "def _createPublicKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n return _createPublicRSAKey(key.n, key.e)" ]
[ "0.7472575", "0.72257024", "0.71392846", "0.68894285", "0.67382544", "0.6495051", "0.6290962", "0.62046856", "0.61463255", "0.6132795", "0.6115434", "0.6094741", "0.608693", "0.6077499", "0.6075479", "0.59709907", "0.5938014", "0.5919355", "0.59184045", "0.5892657", "0.5859119", "0.58483803", "0.58438087", "0.5841711", "0.58202857", "0.58076364", "0.57849914", "0.57528645", "0.57430816", "0.57232267", "0.5715683", "0.5650747", "0.5650475", "0.5646658", "0.56343335", "0.56341547", "0.55886537", "0.55684984", "0.55326784", "0.5531519", "0.5531031", "0.5517391", "0.5501543", "0.54963976", "0.54902714", "0.5486714", "0.54860973", "0.54691905", "0.54647017", "0.54532367", "0.5451414", "0.5436021", "0.5435557", "0.54352844", "0.540859", "0.53953224", "0.5382272", "0.53723735", "0.5371216", "0.5364644", "0.5363218", "0.53522193", "0.5352024", "0.53493094", "0.5342444", "0.5323794", "0.5323277", "0.5284804", "0.5265346", "0.52649844", "0.5263025", "0.5239281", "0.52382094", "0.5222319", "0.52178705", "0.5210794", "0.5208141", "0.52053195", "0.52034354", "0.51950353", "0.5174065", "0.51717", "0.51676476", "0.5149435", "0.5144351", "0.5143002", "0.5129361", "0.5117833", "0.5107104", "0.5105768", "0.50926423", "0.50658906", "0.50498307", "0.5046995", "0.5044493", "0.5034463", "0.50262463", "0.50207776", "0.5014084", "0.5012601" ]
0.6618405
5
Loads a certificate from a DER or PEMformatted file. Supports X.509 certificates only.
def parse_certificate(data): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) key_type = None # Appears to be PEM formatted if re.match(b'\\s*-----', data) is not None: key_type, _, data = _unarmor_pem(data) if key_type == 'private key': raise ValueError(pretty_message( ''' The data specified does not appear to be a certificate, but rather a private key ''' )) if key_type == 'public key': raise ValueError(pretty_message( ''' The data specified does not appear to be a certificate, but rather a public key ''' )) if key_type is None or key_type == 'certificate': try: return Certificate.load(data) except (ValueError): pass # Data was not a Certificate raise ValueError(pretty_message( ''' The data specified does not appear to be a known certificate format ''' ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_cert(file, format=FORMAT_PEM):\n bio = BIO.openfile(file)\n if format == FORMAT_PEM:\n return load_cert_bio(bio)\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)\n else:\n raise ValueError(\"Unknown format. Must be either FORMAT_DER or FORMAT_PEM\")", "def load_certificate(file_path: str, encoding: Encoding = None) -> Certificate:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(certificate_data: bytes) -> Certificate:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param certificate_data: given certificate data\n :return: loaded certificate\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_x509_certificate,\n Encoding.DER: load_der_x509_certificate\n }[real_encoding](certificate_data, default_backend())\n\n return generic_load(file_path, solve)", "def load_pem_x509_certificate(data):\n return _x509.load_pem_x509_certificate(data, _backends.default_backend())", "def load_cert(file):\n with open(file, \"r\") as pemfile:\n cert_content = pemfile.read()\n cert_stripped = \"\".join(\n [line for line in cert_content.splitlines() if \"CERTIFICATE\" not in line])\n\n logging.info('Loaded certificate from {}'.format(file))\n return cert_stripped", "def load_certificates_bytes_from_file(certificates_file_path: str) -> bytes:\n\n try:\n with open(certificates_file_path, 'rb') as certs_file:\n return certs_file.read()\n except FileNotFoundError:\n raise X509CertificateError(\n 'Certificates file not found: {}'.format(certificates_file_path)\n )\n except Exception as err:\n raise X509CertificateError(\n 'Certificates file could not be read: {}'.format(str(err))\n )", "def load_cert_chain(self, certfile, keyfile: Optional[Any] = ...):\n ...", "def load(cls, cert_path: Union[Path, str], key_path: Union[Path, str]) -> \"CertificateAuthority\":\n cert_path, key_path = Path(cert_path), Path(key_path)\n\n with cert_path.open(\"rb\") as file:\n cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, file.read())\n\n with key_path.open(\"rb\") as file:\n key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, file.read())\n\n return cls(key, cert)", "def load_cert_bio(bio, format=FORMAT_PEM):\n if format == FORMAT_PEM:\n cptr = m2.x509_read_pem(bio._ptr())\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n else:\n raise ValueError(\"Unknown format. Must be either FORMAT_DER or FORMAT_PEM\")\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def _try_load_ca_cert(path):\n crt = crypto.load_certificate(crypto.FILETYPE_PEM,\n open(path, 'rb').read())\n if crt.has_expired():\n raise ValueError('CA certificate has expired.')\n if crt.get_signature_algorithm() in ('md5', 'sha1'):\n raise ValueError('CA certificate signed with MD5 or SHA1.')\n return crt", "def load_cert_der_string(string):\n bio = BIO.MemoryBuffer(string)\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def solve(certificate_data: bytes) -> Certificate:\n return { # type: ignore\n Encoding.PEM: load_pem_x509_certificate,\n Encoding.DER: load_der_x509_certificate\n }[real_encoding](certificate_data, default_backend())", "def load_key_and_cert(key_file, cert_file):\n with open(cert_file, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n with open(key_file, 'rb') as f:\n key = serialization.load_pem_private_key(f.read(), None, backend=default_backend())\n\n return key, cert", "def load_cert_string(string, format=FORMAT_PEM):\n bio = BIO.MemoryBuffer(string)\n return load_cert_bio(bio, format)", "async def import_certificate(\n self, certificate_name: str, certificate_bytes: bytes, **kwargs\n ) -> KeyVaultCertificate:\n\n enabled = kwargs.pop(\"enabled\", None)\n policy = kwargs.pop(\"policy\", None)\n\n if enabled is not None:\n attributes = self._models.CertificateAttributes(enabled=enabled)\n else:\n attributes = None\n base64_encoded_certificate = base64.b64encode(certificate_bytes).decode(\"utf-8\")\n\n parameters = self._models.CertificateImportParameters(\n base64_encoded_certificate=base64_encoded_certificate,\n password=kwargs.pop(\"password\", None),\n certificate_policy=policy._to_certificate_policy_bundle() if policy else None,\n certificate_attributes=attributes,\n tags=kwargs.pop(\"tags\", None),\n )\n\n bundle = await self._client.import_certificate(\n vault_base_url=self.vault_url,\n certificate_name=certificate_name,\n parameters=parameters,\n **kwargs\n )\n return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)", "def load_crl(file):\n f=BIO.openfile(file)\n cptr= m2.x509_crl_read_pem(f.bio_ptr())\n f.close()\n if cptr is None:\n raise X509Error(Err.get_error())\n return CRL(cptr, 1)", "def get_certificate_from_file(file_path):\n LOG.debug(\"extracting information of certificate in %s\" % file_path)\n try:\n with open(file_path, 'rb') as file_data:\n file_data.seek(0, os.SEEK_SET)\n read_file = file_data.read()\n certificate = extract_certs_from_pem(read_file)[0]\n except Exception as e:\n LOG.warning(\"No certificate was extracted from file %s\"\n \"due to %s\" % (file_path, e))\n return None\n return certificate", "def load_x509_cert(url, httpc, spec2key, **get_args):\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []", "def der_cert(der_data):\n if isinstance(der_data, str):\n der_data = bytes(der_data, \"utf-8\")\n return x509.load_der_x509_certificate(der_data, default_backend())", "def _try_load_ca_private_key(path):\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'rb').read())\n if pkey.bits() < 2048:\n raise ValueError(\"I'm sorry Dave, I can't let you use a small \"\n \"RSA key.\")\n pkey.check()\n return pkey", "def read_cert_bundle(ca_bundle_file, storage=None):\n if storage is None:\n storage = ROOT_CERTIFICATES_DICT\n logger = getLogger(__name__)\n logger.debug('reading certificate bundle: %s', ca_bundle_file)\n # cabundle file encoding varies. Tries reading it in utf-8 but ignore\n # all errors\n all_certs = codecs.open(\n ca_bundle_file, 'r', encoding='utf-8', errors='ignore').read()\n state = 0\n contents = []\n for line in all_certs.split('\\n'):\n if state == 0 and line.startswith('-----BEGIN CERTIFICATE-----'):\n state = 1\n contents.append(line)\n elif state == 1:\n contents.append(line)\n if line.startswith('-----END CERTIFICATE-----'):\n cert = load_certificate(\n FILETYPE_PEM,\n '\\n'.join(contents).encode('utf-8'))\n storage[cert.get_subject().der()] = cert\n state = 0\n contents = []", "def import_public_key_from_cert_file(filename):\n with open(filename, \"rb\") as key_file:\n cert = x509.load_pem_x509_certificate(key_file.read(), backend=default_backend())\n return cert.public_key()", "def load_private_key(filename):\n\twith open(str(filename) + \"_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_private_key(\n\t\tkey_file.read(),\n\t\tpassword=None,\n\t\tbackend=default_backend()\n\t)", "def import_private_key_from_pem_file(filename, passphrase=None):\n with open(filename, \"rb\") as key_file:\n private_key = serialization.load_pem_private_key(\n key_file.read(), password=passphrase, backend=default_backend()\n )\n return private_key", "def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key", "def load_private_key(file_path: str, password: bytes = None,\n encoding: Encoding = None) -> PrivateKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PrivateKey:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param key_data: given private keys data\n :return: loaded private key\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())\n\n return generic_load(file_path, solve)", "def get_certificate(self, path: Union[bytes, str]) -> str:\n path = _to_bytes_or_null(path)\n certificate = ffi.new(\"char **\")\n ret = lib.Fapi_GetCertificate(self._ctx, path, certificate)\n _chkrc(ret)\n # certificate is guaranteed to be a null-terminated string\n return ffi.string(_get_dptr(certificate, lib.Fapi_Free)).decode()", "def load_request(file, format=FORMAT_PEM):\n f=BIO.openfile(file)\n if format == FORMAT_PEM:\n cptr= m2.x509_req_read_pem(f.bio_ptr())\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509_req(f.bio_ptr())\n else:\n raise ValueError(\"Unknown filetype. Must be either FORMAT_PEM or FORMAT_DER\")\n f.close()\n if cptr is None:\n raise X509Error(Err.get_error())\n return Request(cptr, 1)", "def from_file(path, encoding='pem'):\n try:\n with open(path, 'r') as f:\n return X509Csr.from_open_file(f, encoding)\n except IOError:\n raise X509CsrError(\"Could not read file %s\" % path)", "def from_bytes(cls, bytes):\n construct = _constructs.Certificate.parse(bytes)\n return cls(\n certificate_list=[\n ASN1Cert(\n asn1_cert=asn1cert.asn1_cert\n )\n for asn1cert in construct.certificate_list],\n )", "def _load_verify_cafile(self, cafile):\n with open(cafile, \"w\") as fObj:\n fObj.write(root_cert_pem.decode(\"ascii\"))\n\n self._load_verify_locations_test(cafile)", "def fetch_cert(source, entry, s3_client):\n if source == \"s3\":\n bucket_and_key = parse_s3_url(entry)\n logger.info(\"...reading s3 source = {}\".format(bucket_and_key))\n pem_cert = s3_client.get_object(\n Bucket=bucket_and_key[\"bucket\"], Key=bucket_and_key[\"key\"]\n )\n pem_cert_body = pem_cert[\"Body\"].read()\n elif source == \"memory\":\n logger.info(\"...reading from memory\")\n pem_cert_body = entry\n else:\n raise ValueError(\n \"Invalid cert entry type {}, \" \"must be one of s3, memory\".format(source)\n )\n\n # Python3 will return a byte string, Python2 will return a string\n if type(pem_cert_body) == bytes:\n pem_cert_body = pem_cert_body.decode(\"utf-8\")\n\n return pem_cert_body", "def get_certificate(self, cert_id):\r\n return self.ssl.getObject(id=cert_id)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate: Optional[pulumi.Input[str]] = None,\n certificate_id: Optional[pulumi.Input[str]] = None,\n certificate_name: Optional[pulumi.Input[str]] = None,\n domain: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None) -> 'Certificate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _CertificateState.__new__(_CertificateState)\n\n __props__.__dict__[\"certificate\"] = certificate\n __props__.__dict__[\"certificate_id\"] = certificate_id\n __props__.__dict__[\"certificate_name\"] = certificate_name\n __props__.__dict__[\"domain\"] = domain\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"private_key\"] = private_key\n return Certificate(resource_name, opts=opts, __props__=__props__)", "def __init__(self, cert_string=None, cert_file=None, key_string=None, key_file=None, passphrase=None):\n self._context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n\n if cert_file:\n # we have to load certificate for equality check. there is no\n # other way to obtain certificate from context.\n with open(cert_file, 'rb') as fp:\n cert_string = fp.read()\n\n cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_string)\n self._context.use_certificate(cert)\n\n if not key_string and not key_file:\n # OpenSSL is smart enought to locate private key in certificate\n args = [OpenSSL.crypto.FILETYPE_PEM, cert_string]\n if passphrase is not None:\n args.append(passphrase)\n\n pk = OpenSSL.crypto.load_privatekey(*args)\n self._context.use_privatekey(pk)\n elif key_file and not passphrase:\n self._context.use_privatekey_file(key_file, OpenSSL.crypto.FILETYPE_PEM)\n\n else:\n if key_file:\n # key file is provided with passphrase. context.use_privatekey_file\n # does not use passphrase, so we have to load the key file manually.\n with open(key_file, 'rb') as fp:\n key_string = fp.read()\n\n args = [OpenSSL.crypto.FILETYPE_PEM, key_string]\n if passphrase is not None:\n args.append(passphrase)\n\n pk = OpenSSL.crypto.load_privatekey(*args)\n self._context.use_privatekey(pk)\n\n # check if we are not passed some garbage\n self._context.check_privatekey()\n\n # used to compare certificates.\n self._equality = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)", "def test_load_client_ca(self, context, ca_file):\n context.load_client_ca(ca_file)", "def cert_file(self):\n return self._get('cert_file')", "def _use_certificate_file_test(self, certificate_file):\n # TODO\n # Hard to assert anything. But we could set a privatekey then ask\n # OpenSSL if the cert and key agree using check_privatekey. Then as\n # long as check_privatekey works right we're good...\n with open(certificate_file, \"wb\") as pem_file:\n pem_file.write(root_cert_pem)\n\n ctx = Context(SSLv23_METHOD)\n ctx.use_certificate_file(certificate_file)", "def get_cert_content(certificate):\n cert_object = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)\n cert_content = crypto.dump_certificate(crypto.FILETYPE_TEXT, cert_object)\n return cert_content", "def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self", "def import_public_key_from_pem_file(filename):\n with open(filename, \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(key_file.read(), backend=default_backend())\n return public_key", "def _load_ssl_certificate(self) -> ssl.SSLContext:\n\n sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n sslcontext.load_cert_chain(\n path.join(path.dirname(__file__), '..', '..', 'player.crt'),\n path.join(path.dirname(__file__), '..', '..', 'player.key')\n )\n\n return sslcontext", "def load_public_key(filename):\n\twith open(str(filename) + \"_pub_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_public_key(\n\t\tkey_file.read(),\n\t\tbackend=default_backend()\n\t)", "async def get_certificate(self, certificate_name: str, **kwargs) -> KeyVaultCertificate:\n bundle = await self._client.get_certificate(\n vault_base_url=self.vault_url,\n certificate_name=certificate_name,\n certificate_version=\"\",\n **kwargs\n )\n return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)", "def _parse_certificate(cls, response):\n links = _parse_header_links(response)\n try:\n cert_chain_uri = links[u'up'][u'url']\n except KeyError:\n cert_chain_uri = None\n return (\n response.content()\n .addCallback(\n lambda body: messages.CertificateResource(\n uri=cls._maybe_location(response),\n cert_chain_uri=cert_chain_uri,\n body=body))\n )", "def get_x509_certificate_by_name(certs, key_name):\n for cert in certs['certificates']:\n if cert['key_name'] == key_name:\n return cert['x509_certificate_pem']\n raise CertificateError('Certificate \\'%s\\' not found' % key_name)", "def test_load_client_ca_invalid(self, context, tmpdir):\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write(\"\")\n\n with pytest.raises(Error) as e:\n context.load_client_ca(str(ca_file).encode(\"ascii\"))\n\n assert \"PEM routines\" == e.value.args[0][0][0]", "def load(filename):\n f = codecs.open(filename, encoding='utf-8')\n try:\n result = f.read()\n finally:\n f.close()\n if not encodes_as_ascii(result):\n # see https://bitbucket.org/kang/python-keyring-lib/issue/55\n raise ValueError(\"distutils requires ASCII\")\n return result", "def pem_armor_certificate(certificate):\n\n return asymmetric.dump_certificate(certificate)", "def dcos_ca_bundle():\n resp = sdk_cmd.cluster_request('GET', '/ca/dcos-ca.crt')\n cert = resp.content.decode('ascii')\n assert cert is not None\n return cert", "def parse_der_certificates(der_bytes: bytes) -> List[Certificate]:\n\n result = []\n try:\n leaf = x509.load_der_x509_certificate(der_bytes, default_backend())\n result.append(leaf)\n _, remaining_data = decode(der_bytes)\n while len(remaining_data) > 0:\n cert = x509.load_der_x509_certificate(remaining_data, default_backend())\n result.append(cert)\n _, remaining_data = decode(remaining_data)\n except Exception:\n raise X509CertificateError('Unable to parse DER X.509 certificate')\n\n return result", "def load_key(self, pemfile_path_abs: str, set_priv=False) -> None:\n return None", "def load_from_existing(self, obj):\n self.subject = self.extract_name(obj.subject)\n\n for ext in obj.extensions:\n crit = ext.critical\n extobj = ext.value\n if ext.oid == ExtensionOID.BASIC_CONSTRAINTS:\n if not crit:\n raise InvalidCertificate(\"BASIC_CONSTRAINTS must be critical\")\n self.ca = extobj.ca\n self.path_length = None\n if self.ca:\n self.path_length = extobj.path_length\n elif ext.oid == ExtensionOID.KEY_USAGE:\n if not crit:\n raise InvalidCertificate(\"KEY_USAGE must be critical\")\n self.usage += self.extract_key_usage(extobj)\n elif ext.oid == ExtensionOID.SUBJECT_ALTERNATIVE_NAME:\n self.san = self.extract_gnames(extobj)\n elif ext.oid == ExtensionOID.EXTENDED_KEY_USAGE:\n self.usage += self.extract_xkey_usage(extobj)\n elif ext.oid == ExtensionOID.AUTHORITY_INFORMATION_ACCESS:\n for ad in extobj:\n if not isinstance(ad.access_location, x509.UniformResourceIdentifier):\n InvalidCertificate(\"Unsupported access_location: %s\" % (ad.access_location,))\n url = as_unicode(ad.access_location.value)\n\n if ad.access_method == AuthorityInformationAccessOID.CA_ISSUERS:\n self.issuer_urls.append(url)\n elif ad.access_method == AuthorityInformationAccessOID.OCSP:\n self.ocsp_urls.append(url)\n else:\n raise InvalidCertificate(\"Unsupported access_method: %s\" % (ad.access_method,))\n elif ext.oid == ExtensionOID.CRL_DISTRIBUTION_POINTS:\n for dp in extobj:\n if dp.relative_name:\n raise InvalidCertificate(\"DistributionPoint.relative_name not supported\")\n if dp.crl_issuer:\n raise InvalidCertificate(\"DistributionPoint.crl_issuer not supported\")\n if dp.reasons:\n raise InvalidCertificate(\"DistributionPoint.reasons not supported\")\n\n for gn in self.extract_gnames(dp.full_name):\n if gn.startswith('uri:'):\n self.crl_urls.append(gn[4:])\n else:\n raise InvalidCertificate(\"Unsupported DistributionPoint: %s\" % (gn,))\n elif ext.oid == ExtensionOID.NAME_CONSTRAINTS:\n self.permit_subtrees = self.extract_gnames(extobj.permitted_subtrees)\n self.exclude_subtrees = self.extract_gnames(extobj.excluded_subtrees)\n elif ext.oid == ExtensionOID.SUBJECT_KEY_IDENTIFIER:\n pass\n elif ext.oid == ExtensionOID.AUTHORITY_KEY_IDENTIFIER:\n pass\n elif ext.oid == ExtensionOID.OCSP_NO_CHECK:\n self.ocsp_nocheck = True\n elif ext.oid == ExtensionOID.TLS_FEATURE:\n for tls_feature_code in extobj:\n if tls_feature_code == x509.TLSFeatureType.status_request:\n self.ocsp_must_staple = True\n elif tls_feature_code == x509.TLSFeatureType.status_request_v2:\n self.ocsp_must_staple_v2 = True\n else:\n raise InvalidCertificate(\"Unsupported TLSFeature: %r\" % (tls_feature_code,))\n else:\n raise InvalidCertificate(\"Unsupported extension in CSR: %s\" % (ext,))", "def load_private_key_der(self, private_key_der):\n return self.load_private_key(SigningKey.from_der(private_key_der))", "def handle_pem_extension(oid, _input):\r\n try:\r\n cert = objects.X509(oid)\r\n cert.pem = _input.read()\r\n except (ValueError, TypeError, OSError) as failed_to_init:\r\n raise click.BadParameter(\r\n '[{0}]: File Content can\\'t be parsed or written.\\n {1}'.format(_input.name, _input.read())\r\n ) from failed_to_init", "def import_publickey_cert_pem(self, cert_pemstring, privkey_pemstring=None):\n ## TODO: This method is not tested. It may have bugs.\n if isinstance(cert_pemstring, str):\n cert_pemstring = cert_pemstring.encode('utf-8')\n cert = x509.load_pem_x509_certificate(cert_pemstring, default_backend())\n fingerprint = cert.fingerprint(hashes.SHA256())\n\n if privkey_pemstring is not None:\n self.mk_keyobj_from_private_key_pem(privkey_pemstring)\n sig = self.private_key_obj.sign(fingerprint, ec.ECDSA(utils.Prehashed(hashes.SHA256())))\n public_key = cert.public_key()\n result = public_key.verify(sig, fingerprint, ec.ECDSA(hashes.SHA256()))\n if not result:\n return False\n self.private_key_obj = public_key\n self._get_naive_private_key_bytes()\n\n self._get_naive_public_key_bytes()\n return True", "def from_binary(self, d):\n p = MsgEcdsaCertificate._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))", "def test_use_certificate(self, ctx_or_conn):\n # TODO\n # Hard to assert anything. But we could set a privatekey then ask\n # OpenSSL if the cert and key agree using check_privatekey. Then as\n # long as check_privatekey works right we're good...\n ctx_or_conn.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate: Optional[pulumi.Input[str]] = None,\n certificate_id: Optional[pulumi.Input[int]] = None,\n creation_timestamp: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n expire_time: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None) -> 'SSLCertificate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SSLCertificateState.__new__(_SSLCertificateState)\n\n __props__.__dict__[\"certificate\"] = certificate\n __props__.__dict__[\"certificate_id\"] = certificate_id\n __props__.__dict__[\"creation_timestamp\"] = creation_timestamp\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"expire_time\"] = expire_time\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"name_prefix\"] = name_prefix\n __props__.__dict__[\"private_key\"] = private_key\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"self_link\"] = self_link\n return SSLCertificate(resource_name, opts=opts, __props__=__props__)", "def load(self, filename=None):\n if filename is None:\n filename = BlockChainConf.DEFAULT_CHAIN_DUMP_FILENAME\n\n with open(filename, \"r\") as chain:\n chain = loads(chain)\n\n # verify the integrity of the chain\n # before simply assuming that it is\n # a valid one", "def _load_ssl(self, ssl_options: tuple):\n try:\n self._ssl.load_cert_chain(certfile=ssl_options[0], keyfile=ssl_options[1], password=ssl_options[2])\n except IOError as e:\n self.logger.error(\"Unable to load certificate files: {}\".format(e))\n self.stop()", "def _get_pubkey_from_der_x509_certificate(filedata: bytes, backend: Any) -> Tuple[Any, Optional[int]]:\n try:\n cert = x509.load_der_x509_certificate(filedata, backend=backend)\n return cert.public_key(), _get_keyidv2_from_cert(cert)\n except Exception:\n return None, None", "def load_Fernet_key(filename):\n\tfich = open(str(filename) +'.key', 'rb')\n\tkey = fich.read() # The key will be type bytes\n\tfich.close()\n\treturn key", "def load(self, filepath):\n cypher_text = b''\n with open(filepath, 'rb') as f:\n header = f.read(16)\n cypher_text = f.read()\n data = self._decrypt(cypher_text)\n decrypted_header = data[:16]\n self._validate_header(header, decrypted_header)\n plain_text = str(data[16:], 'utf-8')\n cds = json.loads(plain_text, cls=ClientDataDecoder)\n if cds is None:\n cds = []\n return cds", "def _check_certificate(public_cert_content, priv_key_content,\n domain=None, at_time=None):\n result = {}\n # Read the private key and public certificate\n try:\n priv_key = OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, priv_key_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate_key': {\n 'state': 'invalid', 'detail': str(err)}})\n priv_key = None\n\n try:\n public_cert = OpenSSL.crypto.load_certificate(\n OpenSSL.crypto.FILETYPE_PEM, public_cert_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate': {\n 'state': 'invalid', 'detail': str(err)}})\n public_cert = None\n\n if priv_key and public_cert:\n context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n context.use_privatekey(priv_key)\n context.use_certificate(public_cert)\n try:\n context.check_privatekey()\n except OpenSSL.SSL.Error:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate does not match private key.\"}})\n\n if result:\n raise RuntimeError(result)\n\n not_after = public_cert.get_notAfter()\n if not isinstance(not_after, six.string_types):\n not_after = not_after.decode('utf-8')\n not_after = datetime.datetime.strptime(not_after, \"%Y%m%d%H%M%SZ\")\n common_name = public_cert.get_subject().commonName\n alt_names = []\n for ext_idx in range(0, public_cert.get_extension_count()):\n extension = public_cert.get_extension(ext_idx)\n if extension.get_short_name().decode('utf-8') == 'subjectAltName':\n # data of the X509 extension, encoded as ASN.1\n decoded_alt_names, _ = asn1_decoder(\n extension.get_data(), asn1Spec=SubjectAltName())\n for alt in nat_encoder(decoded_alt_names):\n alt_name = alt['dNSName'].decode('utf-8')\n if alt_name != common_name:\n alt_names += [alt_name]\n if domain:\n found = False\n for alt_name in [common_name] + alt_names:\n regex = alt_name.replace('.', r'\\.').replace('*', r'.*') + '$'\n if re.match(regex, domain) or alt_name == domain:\n found = True\n break\n if not found:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"domain name (%s) does not match common or alt names\"\\\n \" present in certificate (%s, %s).\" % (\n domain, common_name, ','.join(alt_names))}})\n if at_time:\n if not_after <= at_time:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate is only valid until %s.\" % not_after}})\n\n if result:\n raise RuntimeError(result)\n\n result.update({'ssl_certificate': {\n 'common_name': common_name,\n 'alt_names': alt_names,\n 'state': result.get('ssl_certificate', {}).get('state', 'valid'),\n 'issuer': public_cert.get_issuer().organizationName,\n 'ends_at': not_after.isoformat()}})\n return result", "def serialize_certificate(\n certificate: Certificate, encoding: serialization.Encoding\n) -> bytes:\n try:\n cert_bytes = certificate.public_bytes(encoding)\n except Exception as err:\n raise X509CertificateError(\n 'Could not get bytes from object: {}'.format(str(err))\n )\n\n return cert_bytes", "def Certificate(self) -> _n_8_t_0:", "def Certificate(self) -> _n_8_t_0:", "def cert_to_pem(cert):\n return cert.public_bytes(Encoding.PEM)", "def fetch_x509_context(self) -> X509Context:", "def create_cert(self, cert_file, key_file):\n if os.path.isfile(cert_file) and os.path.isfile(key_file):\n return cert_file, key_file\n\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 2048)\n cert = crypto.X509()\n cert.get_subject().C = \"US\"\n cert.get_subject().ST = \"CO\"\n cert.get_subject().L = \"Denver\"\n cert.get_subject().CN = gethostname()\n cert.get_subject().O = \"Metropolitan State University of Denver\"\n cert.get_subject().OU = \"Computer Science\"\n cert.set_serial_number(6)\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(365*24*60*60)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(join(cert_file), 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(join(key_file), \"w\").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n return cert_file, key_file", "def test_load_client_ca_unicode(self, context, ca_file):\n pytest.deprecated_call(context.load_client_ca, ca_file.decode(\"ascii\"))", "def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))", "def cert(self, value):\n self._cert = value", "def loadFile(filename):\n\t\n\tf = open(filename, 'r')\n\t\n\ttry:\n\t\tcontent = f.read()\n\t\treturn content\n\tfinally:\n\t\tf.close()", "def _sign_cert(self, cert):\n with open(self._get_key_link(self.commonname), 'r') as private_file:\n data = private_file.read()\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM,\n data)\n cert.sign(pkey, 'sha256')", "def parse_pem_certificates(pem_bytes: bytes) -> List[Certificate]:\n\n parsed_certs = pem.parse(pem_bytes)\n if not parsed_certs:\n raise X509CertificateError('Unable to parse PEM X.509 certificate')\n\n result = []\n for cert in parsed_certs:\n try:\n x509_cert = x509.load_pem_x509_certificate(\n cert.as_bytes(), default_backend()\n )\n result.append(x509_cert)\n except Exception:\n raise X509CertificateError('Unable to parse PEM X.509 certificate')\n\n return result", "def loaded_certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LoadedCertificateArgs']]]]:\n return pulumi.get(self, \"loaded_certificates\")", "def pfx2pem(input_file, output_file, passphrase=None):\n pfx = open(input_file, 'rb').read()\n p12 = crypto.load_pkcs12(pfx, passphrase)\n pem = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pem += crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n open(output_file, 'wb').write(pem)", "def get_cert_print_bytes(cert):\n #collect PEM bytes\n cert_bytes = cert.decode(\"utf-8\")\n cert_bytes += '\\n'\n\n #collect certificate text\n cert_bytes += certs_handler.get_cert_content(cert).decode(\"utf-8\")\n cert_bytes += '\\n'\n\n #contains both PEM and certificate text\n return cert_bytes", "def encode_certificate(self, cert):\n return cert.public_bytes(\n serialization.Encoding.PEM,\n ).decode(encoding='UTF-8')", "def verify_file(data_file, cert_file=None, signature_file=None, trust_dir=None):\n # Sanitize before appending signature extension\n data_file = os.path.realpath(data_file)\n\n if not signature_file:\n signature_file = data_file + '.' + EXT_SIGN\n if not cert_file:\n cert_file = data_file + '.' + EXT_CERT\n if not os.path.exists(signature_file) or not os.path.exists(cert_file):\n return SIGN_NO\n\n # Verify certificate\n cert_validity = verify_certificate(cert_file)\n if cert_validity == CERT_CORRUPTED:\n return SIGN_CORRUPTED\n\n # Check trustworthiness of certificate\n if trust_dir != None and check_trust(cert_file, trust_dir) == CERT_UNTRUSTED:\n return SIGN_UNTRUSTED\n\n # Keep public key in a temporary file\n pub_file = tempfile.NamedTemporaryFile()\n pub_file.write(get_public_key(cert_file))\n pub_file.flush()\n\n # Use OpenSSL to verify signature\n command = '/usr/bin/openssl dgst -sha1 -verify %s -signature %s %s'\n command = command % (pub_file.name, signature_file, data_file)\n command = shlex.split(command)\n\n pipe = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n result = pipe.wait()\n\n # Destroy temporary files\n pub_file.close()\n\n if result == 0:\n if cert_validity == CERT_OK:\n return SIGN_OK\n else:\n return SIGN_SELF\n else:\n return SIGN_CORRUPTED", "def sign_file(filename, key_file, cert_file, password_fd):\n data = file(filename).read()\n signed_binary = sign_data(data, key_file, password_fd)\n cert_data = file(cert_file).read()\n\n # Save certificate\n file('%s.%s' % (filename, EXT_CERT), 'w').write(cert_data)\n\n # Save signed data\n file('%s.%s' % (filename, EXT_SIGN), 'w').write(signed_binary)", "def load(self, filename):\n aead_f = open(filename, \"rb\")\n buf = aead_f.read(1024)\n if buf.startswith(YHSM_AEAD_CRLF_File_Marker):\n buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]\n if buf.startswith(YHSM_AEAD_File_Marker):\n if buf[len(YHSM_AEAD_File_Marker)] == chr(1):\n # version 1 format\n fmt = \"< I %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)\n self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)\n self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]\n else:\n raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')\n else:\n # version 0 format, just AEAD data\n self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]\n aead_f.close()", "def from_file(self, file):\n must_close = False\n if isinstance(file, str):\n try:\n file = open(file, \"rb\")\n except (FileNotFoundError, PermissionError) as e:\n raise GPG.DecryptionException(str(e))\n else:\n must_close = True\n result = subprocess.run(\n [GPG.bin, \"--decrypt\"],\n input=file.read(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n if must_close:\n file.close()\n if result.returncode == 0:\n data = result.stdout\n return data\n else:\n raise GPG.DecryptionException(result.stderr)", "def _load_binary(file_name):\n try:\n with open(file_name, 'rb') as f:\n return cp.load(f)\n except UnicodeDecodeError: # When loading Python 2 pickle from Python 3\n with open(file_name, 'rb') as f:\n return cp.load(f, encoding=\"latin1\")", "def from_buffer(data, encoding='pem'):\n return X509Csr.from_open_file(io.BytesIO(data), encoding)", "def load_from(filename):\n from .io import load\n return load(filename)", "def setCertfile(self, certfile):\r\n if not os.access(certfile, os.R_OK):\r\n raise IOError('No such certfile found: %s' % (certfile))\r\n self.certfile = certfile", "def load_private_key_pem(self, private_key_pem):\n return self.load_private_key(SigningKey.from_pem(private_key_pem))", "def get_domains_from_cert(cert_file):\n proc = subprocess.Popen([\"openssl\", \"x509\", \"-in\", cert_file, \"-noout\", \"-text\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n if proc.returncode != 0:\n raise IOError(\"Error loading {0}: {1}\".format(cert_file, err))\n return ACMEClient._parse_domains_from_openssl_output(out.decode('utf8'))", "def __test_cert_file__(parser, certfile):\n if not os.path.exists(certfile):\n parser.error(\"invalid certificate file {} (it not exists)\".format(certfile))\n return True", "def load_key(fn, psw=None):\n if not fn:\n die(\"Need private key\")\n if psw:\n psw = as_bytes(psw)\n data = load_gpg_file(fn)\n key = load_pem_private_key(data, password=psw, backend=get_backend())\n return key", "def get_der(self):\n return OpenSSL.crypto.dump_certificate(\n OpenSSL.crypto.FILETYPE_ASN1, self._cert)", "def __init__(self, enterprise_cert_file_path):\n self._enterprise_cert_file_path = enterprise_cert_file_path\n self._cert = None\n self._sign_callback = None", "def read_chain_pair(private_key, certificates):\n with open(private_key, 'rb') as f:\n private_key = f.read()\n with open(certificates, 'rb') as f:\n certificates = f.read()\n return (private_key, certificates)", "def from_bytes(cls, bytes):\n construct = _constructs.CertificateRequest.parse(bytes)\n return cls(\n certificate_types=[\n enums.ClientCertificateType(cert_type)\n for cert_type in construct.certificate_types.certificate_types\n ],\n supported_signature_algorithms=[\n SignatureAndHashAlgorithm(\n hash=algorithm.hash,\n signature=algorithm.signature,\n )\n for algorithm in (\n construct.supported_signature_algorithms\n )\n ],\n certificate_authorities=(\n construct.certificate_authorities.certificate_authorities\n )\n )", "def test_load_verify_bytes_cafile(self, tmpfile):\n cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())\n self._load_verify_cafile(cafile)", "def loadKey(self, filename=\"priv.key\"):\n try:\n key_file = open(filename, \"r\")\n data = key_file.read()\n aux = data.split(\";\")\n self.lamb = int(aux[0])\n self.mu = int(aux[1])\n except:\n raise Exception(\"could not load key from file: \" + filename)", "def tls_certificate(ca):\n interface, _host, _port = _get_conn_data(ANY_INTERFACE_IPV4)\n return ca.issue_cert(ntou(interface))", "def _get_bytes_from_pem_file(fpath: str) -> bytes:\n with open(fpath, \"rb\") as f:\n return f.read()" ]
[ "0.80966467", "0.78768235", "0.7402788", "0.7182695", "0.690147", "0.6856863", "0.675995", "0.65883523", "0.65731525", "0.65720236", "0.65545803", "0.6515523", "0.64636666", "0.64080656", "0.6395369", "0.6282713", "0.627722", "0.6143954", "0.6116484", "0.6039491", "0.6023128", "0.59939283", "0.58775353", "0.5860667", "0.56761575", "0.56407636", "0.5639378", "0.5628733", "0.5618123", "0.560552", "0.5592322", "0.558657", "0.55642486", "0.5538747", "0.55029386", "0.5480574", "0.54787225", "0.5478192", "0.54371285", "0.54324925", "0.54272074", "0.5358409", "0.53256845", "0.5324217", "0.53194165", "0.5266136", "0.5261223", "0.52601975", "0.5248693", "0.5242095", "0.52205634", "0.5209908", "0.5206542", "0.52035254", "0.51843584", "0.5183237", "0.51765335", "0.51725954", "0.5168703", "0.51675105", "0.51418996", "0.51180667", "0.50959957", "0.50837", "0.50817007", "0.5080247", "0.5080247", "0.50745046", "0.5059095", "0.5056482", "0.5054462", "0.5046148", "0.5039082", "0.5037691", "0.50361556", "0.50344783", "0.5032205", "0.5021428", "0.50143087", "0.50137174", "0.50055593", "0.500371", "0.5003306", "0.5001493", "0.50013846", "0.4996775", "0.49861768", "0.49779138", "0.49716458", "0.49708417", "0.49708223", "0.49676928", "0.4959873", "0.49368447", "0.49060437", "0.4902561", "0.48946974", "0.48930782", "0.48865622", "0.48827758" ]
0.62535036
17
Loads a private key from a DER or PEMformatted file. Supports RSA, DSA and
def parse_private(data, password=None): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if password is not None: if not isinstance(password, byte_cls): raise TypeError(pretty_message( ''' password must be a byte string, not %s ''', type_name(password) )) else: password = b'' # Appears to be PEM formatted if re.match(b'\\s*-----', data) is not None: key_type, _, data = _unarmor_pem(data, password) if key_type == 'public key': raise ValueError(pretty_message( ''' The data specified does not appear to be a private key, but rather a public key ''' )) if key_type == 'certificate': raise ValueError(pretty_message( ''' The data specified does not appear to be a private key, but rather a certificate ''' )) try: pki = PrivateKeyInfo.load(data) # Call .native to fully parse since asn1crypto is lazy pki.native return pki except (ValueError): pass # Data was not PrivateKeyInfo try: parsed_wrapper = EncryptedPrivateKeyInfo.load(data) encryption_algorithm_info = parsed_wrapper['encryption_algorithm'] encrypted_data = parsed_wrapper['encrypted_data'].native decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password) pki = PrivateKeyInfo.load(decrypted_data) # Call .native to fully parse since asn1crypto is lazy pki.native return pki except (ValueError): pass # Data was not EncryptedPrivateKeyInfo try: parsed = RSAPrivateKey.load(data) # Call .native to fully parse since asn1crypto is lazy parsed.native return PrivateKeyInfo.wrap(parsed, 'rsa') except (ValueError): pass # Data was not an RSAPrivateKey try: parsed = DSAPrivateKey.load(data) # Call .native to fully parse since asn1crypto is lazy parsed.native return PrivateKeyInfo.wrap(parsed, 'dsa') except (ValueError): pass # Data was not a DSAPrivateKey try: parsed = ECPrivateKey.load(data) # Call .native to fully parse since asn1crypto is lazy parsed.native return PrivateKeyInfo.wrap(parsed, 'ec') except (ValueError): pass # Data was not an ECPrivateKey raise ValueError(pretty_message( ''' The data specified does not appear to be a known private key format ''' ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_private_key(file_path: str, password: bytes = None,\n encoding: Encoding = None) -> PrivateKey:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(key_data: bytes) -> PrivateKey:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param key_data: given private keys data\n :return: loaded private key\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())\n\n return generic_load(file_path, solve)", "def load_private_key(filename):\n\twith open(str(filename) + \"_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_private_key(\n\t\tkey_file.read(),\n\t\tpassword=None,\n\t\tbackend=default_backend()\n\t)", "def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key", "def load_key(fn, psw=None):\n if not fn:\n die(\"Need private key\")\n if psw:\n psw = as_bytes(psw)\n data = load_gpg_file(fn)\n key = load_pem_private_key(data, password=psw, backend=get_backend())\n return key", "def load_private_key_der(self, private_key_der):\n return self.load_private_key(SigningKey.from_der(private_key_der))", "def import_private_key_from_pem_file(filename, passphrase=None):\n with open(filename, \"rb\") as key_file:\n private_key = serialization.load_pem_private_key(\n key_file.read(), password=passphrase, backend=default_backend()\n )\n return private_key", "def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)", "def _load_private_key(self, filename, keytype=None):\n type_map = {\n 'dsa': ssh.DSSKey,\n 'rsa': ssh.RSAKey}\n\n if keytype is None:\n with open(filename, 'rb') as k:\n keydata = k.read()\n \n m = re.search(\"BEGIN (.*?) PRIVATE KEY\", keydata)\n if m:\n keytype = m.group(1)\n\n keycls = type_map.get(keytype.lower(), 'dsa')\n\n try:\n key = keycls.from_private_key_file(filename)\n log.debug(\"Loaded key '%s' without password.\", filename)\n except ssh.PasswordRequiredException:\n passphrase = self.config.get('passphrase')\n \n if callable(passphrase):\n passphrase = passphrase(filename,\n self.config.get('remote_host', 'localhost'),\n self.config.get('username', getpass.getuser()))\n if passphrase is None:\n return\n\n if not passphrase:\n passphrase = getpass.getpass(\"Key passphrase: \")\n \n key = keycls.from_private_key_file(filename, passphrase)\n\n return key", "def _try_load_ca_private_key(path):\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'rb').read())\n if pkey.bits() < 2048:\n raise ValueError(\"I'm sorry Dave, I can't let you use a small \"\n \"RSA key.\")\n pkey.check()\n return pkey", "def load_private_key_pem(self, private_key_pem):\n return self.load_private_key(SigningKey.from_pem(private_key_pem))", "def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())", "def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")", "def load_cert(file, format=FORMAT_PEM):\n bio = BIO.openfile(file)\n if format == FORMAT_PEM:\n return load_cert_bio(bio)\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)\n else:\n raise ValueError(\"Unknown format. Must be either FORMAT_DER or FORMAT_PEM\")", "def read_private_key_file(pkey_file,\n pkey_password=None,\n key_type=None,\n logger=None):\n ssh_pkey = None\n key_types = (paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey)\n if hasattr(paramiko, 'Ed25519Key'):\n # NOQA: new in paramiko>=2.2: http://docs.paramiko.org/en/stable/api/keys.html#module-paramiko.ed25519key\n key_types += (paramiko.Ed25519Key, )\n for pkey_class in (key_type,) if key_type else key_types:\n try:\n ssh_pkey = pkey_class.from_private_key_file(\n pkey_file,\n password=pkey_password\n )\n if logger:\n logger.debug('Private key file ({0}, {1}) successfully '\n 'loaded'.format(pkey_file, pkey_class))\n break\n except paramiko.PasswordRequiredException:\n if logger:\n logger.error('Password is required for key {0}'\n .format(pkey_file))\n break\n except paramiko.SSHException:\n if logger:\n logger.debug('Private key file ({0}) could not be loaded '\n 'as type {1} or bad password'\n .format(pkey_file, pkey_class))\n return ssh_pkey", "def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()", "def loadKey(self, filename=\"priv.key\"):\n try:\n key_file = open(filename, \"r\")\n data = key_file.read()\n aux = data.split(\";\")\n self.lamb = int(aux[0])\n self.mu = int(aux[1])\n except:\n raise Exception(\"could not load key from file: \" + filename)", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def load_key(self, pemfile_path_abs: str, set_priv=False) -> None:\n return None", "def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def Read(key):\n dsa = json.loads(key)\n pub = DsaPublicKey.Read(json.dumps(dsa['publicKey']))\n params = { 'x' : util.Decode(dsa['x']) }\n key = DSA.construct((util.BytesToLong(pub._params['y']),\n util.BytesToLong(pub._params['g']),\n util.BytesToLong(pub._params['p']),\n util.BytesToLong(pub._params['q']),\n util.BytesToLong(params['x'])))\n return DsaPrivateKey(params, pub, key, dsa['size'])", "def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def load_key_and_cert(key_file, cert_file):\n with open(cert_file, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n with open(key_file, 'rb') as f:\n key = serialization.load_pem_private_key(f.read(), None, backend=default_backend())\n\n return key, cert", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def Read(key):\n dsa = json.loads(key)\n pub = DsaPublicKey.Read(json.dumps(dsa['publicKey']))\n params = {'x': util.Base64WSDecode(dsa['x'])}\n key = DSA.construct((util.BytesToLong(pub._params['y']),\n util.BytesToLong(pub._params['g']),\n util.BytesToLong(pub._params['p']),\n util.BytesToLong(pub._params['q']),\n util.BytesToLong(params['x'])))\n return DsaPrivateKey(params, pub, key, dsa['size'])", "def _generate_ca_private_key(path):\n DEFAULT_KEY_ALG = crypto.TYPE_RSA\n DEFAULT_KEY_BITS = 2048\n\n pkey = crypto.PKey()\n pkey.generate_key(DEFAULT_KEY_ALG, DEFAULT_KEY_BITS)\n data = crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)\n open(path, 'wb').write(data)\n\n return pkey", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def load_pem_x509_certificate(data):\n return _x509.load_pem_x509_certificate(data, _backends.default_backend())", "def save_rsa_private_key(private_key: RSAPrivateKeyWithSerialization, file_path: str, password: str = None,\n encoding: Encoding = Encoding.PEM) -> None:\n if password:\n if isinstance(password, str):\n password_bytes = password.encode('utf-8')\n else:\n password_bytes = password\n enc = serialization.BestAvailableEncryption(password=password_bytes) if password else serialization.NoEncryption()\n pem_data = private_key.private_bytes(encoding, serialization.PrivateFormat.PKCS8, enc)\n with open(file_path, 'wb') as f:\n f.write(pem_data)", "def validate_privatekey_pem(key_pem):\n assert isinstance(key_pem, str)\n\n private_key_cryptography = serialization.load_pem_private_key(\n data=key_pem.encode('ascii'),\n password=None,\n backend=cryptography_default_backend\n )\n\n if not isinstance(private_key_cryptography, rsa.RSAPrivateKey):\n sys.exit('Unexpected private key type')\n\n return private_key_cryptography", "def deserializePrivateKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_private_key(string, password = None , backend = bc)", "def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key", "def get_private_key():\n if not os.path.exists(_private_key_path):\n return None\n\n try:\n with open(_private_key_path) as secret_file:\n return secret_file.read()\n\n except Exception as exc:\n log.error(f'Could not read private key.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def mk_keyobj_from_private_key_pem(self, pemdat_string):\n if isinstance(pemdat_string, str):\n pemdat_string = pemdat_string.encode()\n self.private_key_obj = serialization.load_pem_private_key(pemdat_string, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def _load_key(self, path):\n with open(path, 'r') as f:\n self._key = f.readline().strip()\n self._secret = f.readline().strip()", "def load_key():\n return open(\"pass.key\", \"rb\").read()", "def loadKey (self, filename=\"pub.key\"):\n try:\n key_file = open(filename, \"r\")\n data = key_file.read()\n aux = data.split(\";\")\n self.n = int(aux[0])\n self.n_sq = int(aux[1])\n self.g = int(aux[2])\n except:\n raise Exception(\"could not load key from file: \" + filename)", "def load_private_data():\n with open(PRIVATE_DATA_FILE) as priv_file:\n return json.load(priv_file)", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {'privateExponent': util.Decode(rsa['privateExponent']),\n 'primeP': util.Decode(rsa['primeP']),\n 'primeQ': util.Decode(rsa['primeQ']),\n 'primeExponentP': util.Decode(rsa['primeExponentP']),\n 'primeExponentQ': util.Decode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Decode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def load_public_key(filename):\n\twith open(str(filename) + \"_pub_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_public_key(\n\t\tkey_file.read(),\n\t\tbackend=default_backend()\n\t)", "def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def private_key(self):\n if self._private_key is not None:\n return self._private_key[0]\n\n spk = self.serialized_private_key\n passphrase = self.passphrase\n\n try:\n self._private_key = [\n serialization.load_pem_private_key(\n self.serialized_private_key,\n backend=default_backend(),\n password=self.passphrase)]\n\n return self._private_key[0]\n\n except:\n raise\n self._private_key = [None]\n return self._private_key[0]", "def rsa_private_key(ctx, key_size=\"4096\"):\n rsa_key_size = int(key_size)\n\n key = rsa.generate_private_key(public_exponent=65537, key_size=rsa_key_size, backend=default_backend())\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def load_Fernet_key(filename):\n\tfich = open(str(filename) +'.key', 'rb')\n\tkey = fich.read() # The key will be type bytes\n\tfich.close()\n\treturn key", "def load_received_public_key_der(self, public_key_der):\n return self.load_received_public_key(VerifyingKey.from_der(public_key_der))", "def import_public_key_from_pem_file(filename):\n with open(filename, \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(key_file.read(), backend=default_backend())\n return public_key", "def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {\n 'privateExponent': util.Base64WSDecode(rsa['privateExponent']),\n 'primeP': util.Base64WSDecode(rsa['primeP']),\n 'primeQ': util.Base64WSDecode(rsa['primeQ']),\n 'primeExponentP': util.Base64WSDecode(rsa['primeExponentP']),\n 'primeExponentQ': util.Base64WSDecode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Base64WSDecode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def PRIVATE_RSA_KEYFILE_PATH() :\n return os.path.join( config.CONFIG_PATH(), \"%s-private.pem\" % RSA_KEYPAIR_PREFIX() )", "def create( cls, user_id = None, private_keyfile_path = None ) :\n user_id = user_id or config.USER_ID()\n private_keyfile_path = private_keyfile_path or PRIVATE_RSA_KEYFILE_PATH()\n with open( private_keyfile_path, \"r\" ) as stream :\n private_key = rsa.PrivateKey.load_pkcs1( stream.read() )\n return cls( user_id, private_key )", "def load_keys(self, load_path=DEFAULT_KEY_PATH):\n try:\n with open(f'{load_path}/id_elgamal', 'r') as f:\n f.read(self.keys['private'])\n with open(f'{load_path}/id_elgamal.pub', 'r') as f:\n self.keys['public']['p'] = f.readline()\n self.keys['public']['g'] = f.readline()\n self.keys['public']['y'] = f.readline()\n debug_message('Loading successful!')\n return self.keys\n except FileNotFoundError:\n debug_message(f'Loading error! ({FileNotFoundError})')\n return 0", "def _get_private_key(self, privkey=None):\n\n # read private keys from keyring\n privkeys = self.gpg.list_keys(True) # True => private keys\n if len(privkeys) > 0 and privkeys[-1].has_key('fingerprint'):\n fingerprints = []\n for k in privkeys:\n fingerprints.append(k['fingerprint'])\n else:\n # no private key in keyring\n return None\n\n if privkey:\n # check for existence of private key received as argument\n # DEVEL: check for expiration as well\n if len(privkey) > 7 and len(privkey) <= 40:\n for fp in fingerprints:\n if fp.endswith(privkey):\n # work with last 16 significant chars internally,\n # even if only 8 are required in trac.ini\n privkey = fp[-16:]\n break\n # no fingerprint matching key ID\n else:\n privkey = None\n else:\n # reset invalid key ID\n privkey = None\n else:\n # select (last) private key from keyring\n privkey = fingerprints[-1][-16:]\n\n return privkey", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def load_rsa_key(key, key_type, key_encoding):\n # (bytes, EncryptionKeyType, KeyEncodingType) -> Any\n # narrow down the output type\n # https://github.com/aws/aws-dynamodb-encryption-python/issues/66\n try:\n loader = _RSA_KEY_LOADING[key_type][key_encoding]\n except KeyError:\n raise ValueError(\"Invalid key type and encoding: {} and {}\".format(key_type, key_encoding))\n\n kwargs = dict(data=key, backend=default_backend())\n if key_type is EncryptionKeyType.PRIVATE:\n kwargs[\"password\"] = None\n\n loaded_key = loader(**kwargs)\n\n if loaded_key.key_size < MinimumKeySizes.RSA.value:\n _LOGGER.warning(\"RSA keys smaller than %d bits are unsafe\", MinimumKeySizes.RSA.value)\n\n return loaded_key", "def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)", "def parsePEMKey(s, private=False, public=False, passwordCallback=None,\r\n implementations=[\"openssl\", \"python\"]):\r\n for implementation in implementations:\r\n if implementation == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n key = OpenSSL_RSAKey.parse(s, passwordCallback)\r\n break\r\n elif implementation == \"python\":\r\n key = Python_RSAKey.parsePEM(s)\r\n break\r\n else:\r\n raise ValueError(\"No acceptable implementations\")\r\n\r\n return _parseKeyHelper(key, private, public)", "def load_key():\n return open(\"secret.key\", \"rb\").read()", "def load_device_key(self, filename):\n pass", "def test_private_key_dsa(self):\n priv = \"\"\"-----BEGIN DSA PRIVATE KEY-----\nMIH4AgEAAkEAnogScrza9M5nFogjwu7MUSgOeWRfHSFWKLiFxfkNOAb1Z5oXTUKR\ncKdSxfI1zu47rvyqV6+4SSkQEsVJ2/7DQQIVANuQv4L3sp8AiUn+mwCyXhedQl2Z\nAkBfCDLU4nx7OeMx+vD9MN7FW57pHm/43B1Tu/cUOWcp5VHPJRuVWJqINIteY/0i\nlFEUCMibgol8Upj6CGnuDpvTAkAbnRx76A8r+o/3I5hlrlAmCi68uiiqW6W2R40U\n2g/KlIiafMEQ3+OrMwwkPX0aaJwa8m7lsUlmhhYOXu5p4fL1AhUAuxjeo0++fjI+\nnEIPmnCNPGjuBY8=\n-----END DSA PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIICDjCCAcqgAwIBAgIJAMcdoiKyV98cMAsGCWCGSAFlAwQDAjAiMRAwDgYDVQQD\nDAdEU0EgNTEyMQ4wDAYDVQQKDAVXZWJDQTAeFw0xODA1MjcxMDI1MjBaFw0xODA2\nMjYxMDI1MjBaMCIxEDAOBgNVBAMMB0RTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMIHw\nMIGoBgcqhkjOOAQBMIGcAkEAnogScrza9M5nFogjwu7MUSgOeWRfHSFWKLiFxfkN\nOAb1Z5oXTUKRcKdSxfI1zu47rvyqV6+4SSkQEsVJ2/7DQQIVANuQv4L3sp8AiUn+\nmwCyXhedQl2ZAkBfCDLU4nx7OeMx+vD9MN7FW57pHm/43B1Tu/cUOWcp5VHPJRuV\nWJqINIteY/0ilFEUCMibgol8Upj6CGnuDpvTA0MAAkAbnRx76A8r+o/3I5hlrlAm\nCi68uiiqW6W2R40U2g/KlIiafMEQ3+OrMwwkPX0aaJwa8m7lsUlmhhYOXu5p4fL1\no1AwTjAdBgNVHQ4EFgQUHub1qPkaKCtkQbmu3RnLaa8QAP4wHwYDVR0jBBgwFoAU\nHub1qPkaKCtkQbmu3RnLaa8QAP4wDAYDVR0TBAUwAwEB/zALBglghkgBZQMEAwID\nMQAwLgIVAMOEZCvJoNjIMzbH0yWrEUS6IxywAhUAzDhkGKvAH1V3o2ZsJsIotFUk\nIiQ=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_DSA)", "def rsa_string_to_privatekey(mystr):\r\n if len(mystr.split()) != 3:\r\n raise ValueError, \"Invalid private key string\"\r\n \r\n return {'d':long(mystr.split()[0]), 'p':long(mystr.split()[1]), 'q':long(mystr.split()[2])}", "def load_certificate(file_path: str, encoding: Encoding = None) -> Certificate:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(certificate_data: bytes) -> Certificate:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param certificate_data: given certificate data\n :return: loaded certificate\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_x509_certificate,\n Encoding.DER: load_der_x509_certificate\n }[real_encoding](certificate_data, default_backend())\n\n return generic_load(file_path, solve)", "def load(cls, cert_path: Union[Path, str], key_path: Union[Path, str]) -> \"CertificateAuthority\":\n cert_path, key_path = Path(cert_path), Path(key_path)\n\n with cert_path.open(\"rb\") as file:\n cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, file.read())\n\n with key_path.open(\"rb\") as file:\n key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, file.read())\n\n return cls(key, cert)", "def private_key(\n self,\n key: str,\n default: Any = undefined,\n description: str = None,\n key_format: Optional[EncryptionKeyFormat] = None,\n passphrase: Optional[str] = None,\n **kwargs\n ) -> Optional[PrivateKey]:\n cast_key = partial(cast_private_key, key_format=key_format, passphrase=passphrase)\n return self._process(key, description=description, default=default, cast=cast_key,type=PrivateKey, **kwargs)", "async def retrieve_private_key(self) -> Tuple[str, str]:\n\n filename, file_path = random.choice(self._private_keys)\n async with aiofiles.open(file_path, mode='r') as file:\n private_key = await file.read()\n return private_key, self._create_public_key_identifier(filename)", "def ed25519_private_key(ctx):\n\n key = ed25519.Ed25519PrivateKey.generate()\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def private_key(self):\n return PrivateKey(self._sk.private_bytes(\n encoding=serialization.Encoding.Raw,\n format=serialization.PrivateFormat.Raw,\n encryption_algorithm=serialization.NoEncryption()))", "def test_get_private_key(self):\n\n expected = self.pem_private_key\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n actual = encryptor.get_private_key()\n\n self.assertEqual(expected, actual)", "def load_key(self):\n\t return open(\"key.key\", \"rb\").read()", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def Read(key):\n\n dsa = json.loads(key)\n params = {'y' : util.Decode(dsa['y']),\n 'p' : util.Decode(dsa['p']),\n 'g' : util.Decode(dsa['g']),\n 'q' : util.Decode(dsa['q'])}\n pubkey = DSA.construct((util.BytesToLong(params['y']),\n util.BytesToLong(params['g']),\n util.BytesToLong(params['p']),\n util.BytesToLong(params['q'])))\n return DsaPublicKey(params, pubkey, dsa['size'])", "def load_cert_der_string(string):\n bio = BIO.MemoryBuffer(string)\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def load_key():\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n try:\r\n return open(key_dir, \"rb\").read()\r\n except:\r\n return None", "def Read(key):\n\n dsa = json.loads(key)\n params = {\n 'y': util.Base64WSDecode(dsa['y']),\n 'p': util.Base64WSDecode(dsa['p']),\n 'g': util.Base64WSDecode(dsa['g']),\n 'q': util.Base64WSDecode(dsa['q'])\n }\n pubkey = DSA.construct(\n (util.BytesToLong(params['y']), util.BytesToLong(params['g']),\n util.BytesToLong(params['p']), util.BytesToLong(params['q'])))\n return DsaPublicKey(params, pubkey, dsa['size'])", "def load_x509_cert(url, httpc, spec2key, **get_args):\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []", "def read_chain_pair(private_key, certificates):\n with open(private_key, 'rb') as f:\n private_key = f.read()\n with open(certificates, 'rb') as f:\n certificates = f.read()\n return (private_key, certificates)", "def store_private_key(private_key,filename):\n\twith open(str(filename) + \"_key.pem\", \"wb\") as key_file:\n\t\tpem = private_key.private_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PrivateFormat.TraditionalOpenSSL,\n\t\tencryption_algorithm=serialization.NoEncryption()\n\t\t)\n\t\tkey_file.write(pem)\n\tkey_file.close()", "def test_use_privatekey_file_bytes(self, tmpfile):\n self._use_privatekey_file_test(\n tmpfile + NON_ASCII.encode(getfilesystemencoding()),\n FILETYPE_PEM,\n )", "def load_password(fn):\n if not fn:\n return None\n data = load_gpg_file(fn)\n data = data.strip(b'\\n')\n return data", "def load_cert_bio(bio, format=FORMAT_PEM):\n if format == FORMAT_PEM:\n cptr = m2.x509_read_pem(bio._ptr())\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n else:\n raise ValueError(\"Unknown format. Must be either FORMAT_DER or FORMAT_PEM\")\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def read_keypair(priv_key_file, public_key_file):\n key_pair = {}\n with open(priv_key_file) as f:\n key_data = f.read()\n f.close()\n key_pair[\"key\"] = key_data\n with open(public_key_file) as f:\n pub_data = f.read()\n f.close()\n key_pair[\"pub\"] = pub_data\n for i in [priv_key_file, public_key_file]:\n os.remove(i)\n return key_pair", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def __decryptRSA(msg, user):\n # Load user's private key\n try:\n with open(\"%s/%s/keys/privateKey.pem\" % (USERS, user), \"rb\") as f:\n privateKey = serialization.load_pem_private_key(\n f.read(),\n password=None,\n backend=default_backend()\n )\n f.close()\n except:\n print(\"Error opening user's private key\")\n print(sys.exc_info())\n return None\n \n # Decrypt message\n return privateKey.decrypt(\n msg, \n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def get_private_key_pem( pkey_path ):\n \n # get the OpenCloud private key \n observer_pkey = syndicate_storage.read_private_key( pkey_path )\n if observer_pkey is None:\n logger.error(\"Failed to load Observer private key\")\n return None\n \n observer_pkey_pem = observer_pkey.exportKey()\n \n return observer_pkey_pem", "def from_file(self, file):\n must_close = False\n if isinstance(file, str):\n try:\n file = open(file, \"rb\")\n except (FileNotFoundError, PermissionError) as e:\n raise GPG.DecryptionException(str(e))\n else:\n must_close = True\n result = subprocess.run(\n [GPG.bin, \"--decrypt\"],\n input=file.read(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n if must_close:\n file.close()\n if result.returncode == 0:\n data = result.stdout\n return data\n else:\n raise GPG.DecryptionException(result.stderr)", "def _use_privatekey_file_test(self, pemfile, filetype):\n key = PKey()\n key.generate_key(TYPE_RSA, 1024)\n\n with open(pemfile, \"wt\") as pem:\n pem.write(dump_privatekey(FILETYPE_PEM, key).decode(\"ascii\"))\n\n ctx = Context(SSLv23_METHOD)\n ctx.use_privatekey_file(pemfile, filetype)", "def decrypt_using_private_key(message):\n public_key_path = os.path.join('keys', 'private.key')\n with open(public_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n cipher = PKCS1_OAEP.new(private_key)\n encrypted = cipher.decrypt(message)\n return encrypted.hex()", "def test_set_private_key_setter(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def from_bytes(b):\n if len(b) < 78:\n raise ValueError(\"b must be at least 78 bytes long.\")\n\n version = int.from_bytes(b[:4], 'big')\n depth = b[4]\n parent_fingerprint = b[5:9]\n index = int.from_bytes(b[9:13], 'big')\n chain_code = b[13:45]\n key_bytes = b[45:78]\n\n rv = None\n if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION:\n if key_bytes[0] != 0:\n raise ValueError(\"First byte of private key must be 0x00!\")\n\n private_key = int.from_bytes(key_bytes[1:], 'big')\n rv = HDPrivateKey(key=private_key,\n chain_code=chain_code,\n index=index,\n depth=depth,\n parent_fingerprint=parent_fingerprint)\n elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION:\n if key_bytes[0] != 0x02 and key_bytes[0] != 0x03:\n raise ValueError(\"First byte of public key must be 0x02 or 0x03!\")\n\n public_key = PublicKey.from_bytes(key_bytes)\n rv = HDPublicKey(x=public_key.point.x,\n y=public_key.point.y,\n chain_code=chain_code,\n index=index,\n depth=depth,\n parent_fingerprint=parent_fingerprint)\n else:\n raise ValueError(\"incorrect encoding.\")\n\n return (rv, b[78:])", "def load_received_public_key_pem(self, public_key_pem):\n return self.load_received_public_key(VerifyingKey.from_pem(public_key_pem))", "def test_set_private_key_setter_pem_str(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def generate_private_key(self):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key generation.\")\n return self.load_private_key(SigningKey.generate(curve=self.curve))", "def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)", "def get_private_key(self):\n# _log.debug(\"get_private_key: node_name={}\".format(self.node_name))\n with open(os.path.join(self.runtime_dir, \"private\", \"private.key\"), 'rb') as f:\n return f.read()", "def __init__(self, gen_priv_key: bool = False, priv_key_path: str = None):\n self.priv_key = None\n self.pub_key = None\n\n # max size = (bytes(rsa) - 2 * bytes(hash) - 2),\n # currently hard-coded to 190 = 256 - 2 * 32 - 2\n self.max_encrypt_size = 190\n\n if gen_priv_key:\n self.priv_key = RSA.generate(2048)\n if priv_key_path is not None:\n path = pathlib.Path(priv_key_path)\n with open(path.as_posix(), 'w') as f:\n f.write(self.priv_key.export_key().decode('utf-8'))\n elif priv_key_path is not None:\n path = pathlib.Path(priv_key_path)\n if path.is_file():\n self.priv_key = RSA.importKey(open(path.as_posix()).read())\n else:\n raise Exception(\"Failed to open file {}\".format(path.as_posix))\n\n if self.priv_key is not None:\n self.pub_key = self.priv_key.publickey()\n\n # delegate encrypt/decrypt function\n self.cipher = PKCS1_OAEP.new(self.priv_key, hashAlgo=SHA256)\n self.decrypt = self.cipher.decrypt", "def get_key_pair_from_pvk_pem_file(fpath: str) -> typing.Tuple[bytes, bytes]:\n pvk = _get_bytes_from_pem_file(fpath).decode(\"UTF-8\")\n sk = ecdsa.SigningKey.from_pem(pvk)\n\n return _get_key_pair_from_sk(sk)", "def load_cert_chain(self, certfile, keyfile: Optional[Any] = ...):\n ...", "def test_use_privatekey_file_unicode(self, tmpfile):\n self._use_privatekey_file_test(\n tmpfile.decode(getfilesystemencoding()) + NON_ASCII,\n FILETYPE_PEM,\n )" ]
[ "0.7638723", "0.760737", "0.75808924", "0.7359124", "0.7319375", "0.73121125", "0.72465986", "0.72324586", "0.711873", "0.6725466", "0.6643382", "0.65556437", "0.65554005", "0.65311825", "0.6508552", "0.6455299", "0.6412754", "0.639861", "0.63618493", "0.63402176", "0.6329978", "0.6312556", "0.6300331", "0.6249528", "0.62481207", "0.6214568", "0.6213353", "0.6212972", "0.61902976", "0.61882997", "0.61863965", "0.6148166", "0.6138975", "0.6119685", "0.611437", "0.61141926", "0.609639", "0.6072047", "0.60644716", "0.6033684", "0.6031687", "0.60049653", "0.59951884", "0.5984028", "0.59675914", "0.5954115", "0.5929715", "0.59120995", "0.59023166", "0.5886233", "0.58615774", "0.58557844", "0.5852727", "0.58488363", "0.5844275", "0.5834842", "0.58155143", "0.5815161", "0.5802392", "0.5796024", "0.5786063", "0.57733107", "0.5765683", "0.57638353", "0.5755679", "0.5727311", "0.57167405", "0.57001245", "0.56853956", "0.5684788", "0.5667348", "0.56497556", "0.56340647", "0.5633335", "0.56149095", "0.5606865", "0.55862194", "0.55810285", "0.55725455", "0.5566625", "0.55511105", "0.5545774", "0.5542748", "0.5517283", "0.551661", "0.55140865", "0.5505773", "0.55057317", "0.5489075", "0.54818994", "0.5463672", "0.5462954", "0.54610556", "0.5452409", "0.54434", "0.5442388", "0.54287076", "0.54282457", "0.5420962", "0.5414377" ]
0.6443896
16
Removes PEMencoding from a public key, private key or certificate. If the private key is encrypted, the password will be used to decrypt it.
def _unarmor_pem(data, password=None): object_type, headers, der_bytes = unarmor(data) type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)' armor_type = re.match(type_regex, object_type) if not armor_type: raise ValueError(pretty_message( ''' data does not seem to contain a PEM-encoded certificate, private key or public key ''' )) pem_header = armor_type.group(1) data = data.strip() # RSA private keys are encrypted after being DER-encoded, but before base64 # encoding, so they need to be handled specially if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']): algo = armor_type.group(2).lower() return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password)) key_type = pem_header.lower() algo = None if key_type == 'encrypted private key': key_type = 'private key' elif key_type == 'rsa public key': key_type = 'public key' algo = 'rsa' return (key_type, algo, der_bytes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key_to_pem(key, password=None):\n if password:\n enc = BestAvailableEncryption(as_bytes(password))\n else:\n enc = NoEncryption()\n return key.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, enc)", "def strip_begin_end_public_key(key):\n return key.replace(\"\\n\", \"\")\\\n .replace(\"-----BEGIN PUBLIC KEY-----\", \"\").replace(\n \"-----END PUBLIC KEY-----\", \"\")", "def _unarmor_pem_openssl_private(headers, data, password):\n\n enc_algo = None\n enc_iv_hex = None\n enc_iv = None\n\n if 'DEK-Info' in headers:\n params = headers['DEK-Info']\n if params.find(',') != -1:\n enc_algo, enc_iv_hex = params.strip().split(',')\n else:\n enc_algo = 'RC4'\n\n if not enc_algo:\n return data\n\n if enc_iv_hex:\n enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))\n enc_algo = enc_algo.lower()\n\n enc_key_length = {\n 'aes-128-cbc': 16,\n 'aes-128': 16,\n 'aes-192-cbc': 24,\n 'aes-192': 24,\n 'aes-256-cbc': 32,\n 'aes-256': 32,\n 'rc4': 16,\n 'rc4-64': 8,\n 'rc4-40': 5,\n 'rc2-64-cbc': 8,\n 'rc2-40-cbc': 5,\n 'rc2-cbc': 16,\n 'rc2': 16,\n 'des-ede3-cbc': 24,\n 'des-ede3': 24,\n 'des3': 24,\n 'des-ede-cbc': 16,\n 'des-cbc': 8,\n 'des': 8,\n }[enc_algo]\n\n enc_key = hashlib.md5(password + enc_iv[0:8]).digest()\n while enc_key_length > len(enc_key):\n enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()\n enc_key = enc_key[0:enc_key_length]\n\n enc_algo_name = {\n 'aes-128-cbc': 'aes',\n 'aes-128': 'aes',\n 'aes-192-cbc': 'aes',\n 'aes-192': 'aes',\n 'aes-256-cbc': 'aes',\n 'aes-256': 'aes',\n 'rc4': 'rc4',\n 'rc4-64': 'rc4',\n 'rc4-40': 'rc4',\n 'rc2-64-cbc': 'rc2',\n 'rc2-40-cbc': 'rc2',\n 'rc2-cbc': 'rc2',\n 'rc2': 'rc2',\n 'des-ede3-cbc': 'tripledes',\n 'des-ede3': 'tripledes',\n 'des3': 'tripledes',\n 'des-ede-cbc': 'tripledes',\n 'des-cbc': 'des',\n 'des': 'des',\n }[enc_algo]\n decrypt_func = crypto_funcs[enc_algo_name]\n\n if enc_algo_name == 'rc4':\n return decrypt_func(enc_key, data)\n\n return decrypt_func(enc_key, data, enc_iv)", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def validate_privatekey_pem(key_pem):\n assert isinstance(key_pem, str)\n\n private_key_cryptography = serialization.load_pem_private_key(\n data=key_pem.encode('ascii'),\n password=None,\n backend=cryptography_default_backend\n )\n\n if not isinstance(private_key_cryptography, rsa.RSAPrivateKey):\n sys.exit('Unexpected private key type')\n\n return private_key_cryptography", "def test_set_private_key_setter_encrypted_pem_str_password(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(\n self.encrypted_pem_private_key, password=self.private_key_password.decode()\n )\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def test_set_private_key_setter_encrypted_pem(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(\n self.encrypted_pem_private_key, password=self.private_key_password\n )\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def dePem(s, name):\r\n prefix = \"-----BEGIN %s-----\" % name\r\n postfix = \"-----END %s-----\" % name \r\n start = s.find(prefix)\r\n if start == -1:\r\n raise SyntaxError(\"Missing PEM prefix\")\r\n end = s.find(postfix, start+len(prefix))\r\n if end == -1:\r\n raise SyntaxError(\"Missing PEM postfix\")\r\n s = s[start+len(\"-----BEGIN %s-----\" % name) : end]\r\n retBytes = a2b_base64(s) # May raise SyntaxError\r\n return retBytes", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def test_set_private_key_setter_pem_str(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def convert_key_to_pem ( key_filename, output_filename ) :\n cmd = 'openssl rsa -in ' + key_filename + ' -outform PEM -out ' + output_filename\n return subprocess.call( cmd, shell = True )", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def passwd_decryption(self):\n with open(self.key_path, 'rb') as input_key:\n for line in input_key:\n key = line\n with open(self.pass_path, 'rb') as input_password:\n for line in input_password:\n password = line\n cipher_suit = Fernet(key)\n plain_password = cipher_suit.decrypt(password)\n plain_password = bytes(plain_password).decode('utf-8')\n \n return plain_password", "def pfx2pem(input_file, output_file, passphrase=None):\n pfx = open(input_file, 'rb').read()\n p12 = crypto.load_pkcs12(pfx, passphrase)\n pem = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pem += crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n open(output_file, 'wb').write(pem)", "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)", "def ec_private_pem_to_private_bin(pem):\n return \"\".join(pem.split(\"\\n\")[1:-2]).decode(\"BASE64\")", "def pem_armor_certificate(certificate):\n\n return asymmetric.dump_certificate(certificate)", "def verify_and_unseal_blob( public_key_pem, secret, blob_data ):\n\n # verify it \n rc, sealed_data = syndicate_crypto.verify_and_parse_json( public_key_pem, blob_data )\n if rc != 0:\n logger.error(\"Failed to verify and parse blob, rc = %s\" % rc)\n return None\n\n logger.info(\"Unsealing credential data\")\n\n rc, data = c_syndicate.password_unseal( sealed_data, secret )\n if rc != 0:\n logger.error(\"Failed to unseal blob, rc = %s\" % rc )\n return None\n\n return data", "def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()", "def load_pem_x509_certificate(data):\n return _x509.load_pem_x509_certificate(data, _backends.default_backend())", "def extract_ca_private_key_bytes_from_pem(pem_content):\n found_marker = False\n for begin_marker in [constants.BEGIN_PRIVATE_KEY_MARKER,\n constants.BEGIN_RSA_PRIVATE_KEY_MARKER]:\n begin_search = pem_content.find(begin_marker)\n if begin_search >= 0:\n found_marker = True\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n found_marker = False\n for end_marker in [constants.END_PRIVATE_KEY_MARKER,\n constants.END_RSA_PRIVATE_KEY_MARKER]:\n end_search = pem_content.find(end_marker)\n if end_search >= 0:\n found_marker = True\n end_search += len(end_marker)\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n base64_key = base64.encode_as_text(pem_content[begin_search:end_search])\n return base64_key", "def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))", "def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())", "def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")", "def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def base64_to_pem(crypto_type, b64_text, width=76):\n lines = ''\n for pos in xrange(0, len(b64_text), width):\n lines += b64_text[pos:pos + width] + '\\n'\n\n return '-----BEGIN %s-----\\n%s-----END %s-----' % (crypto_type, lines, crypto_type)", "def parse_certificate(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a private key\n '''\n ))\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a public key\n '''\n ))\n\n if key_type is None or key_type == 'certificate':\n try:\n return Certificate.load(data)\n except (ValueError):\n pass # Data was not a Certificate\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known certificate format\n '''\n ))", "def decrypt_using_private_key(message):\n public_key_path = os.path.join('keys', 'private.key')\n with open(public_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n cipher = PKCS1_OAEP.new(private_key)\n encrypted = cipher.decrypt(message)\n return encrypted.hex()", "def _unarmor(pem_bytes):\n\n if not isinstance(pem_bytes, byte_cls):\n raise TypeError(unwrap(\n '''\n pem_bytes must be a byte string, not %s\n ''',\n _type_name(pem_bytes)\n ))\n\n # Valid states include: \"trash\", \"headers\", \"body\"\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n\n found_start = False\n found_end = False\n\n for line in pem_bytes.splitlines(False):\n if line == b'':\n continue\n\n if state == \"trash\":\n # Look for a starting line since some CA cert bundle show the cert\n # into in a parsed format above each PEM block\n type_name_match = re.match(b'^(?:---- |-----)BEGIN ([A-Z0-9 ]+)(?: ----|-----)', line)\n if not type_name_match:\n continue\n object_type = type_name_match.group(1).decode('ascii')\n\n found_start = True\n state = 'headers'\n continue\n\n if state == 'headers':\n if line.find(b':') == -1:\n state = 'body'\n else:\n decoded_line = line.decode('ascii')\n name, value = decoded_line.split(':', 1)\n headers[name] = value.strip()\n continue\n\n if state == 'body':\n if line[0:5] in (b'-----', b'---- '):\n der_bytes = base64.b64decode(base64_data)\n\n yield (object_type, headers, der_bytes)\n\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n found_end = True\n continue\n\n base64_data += line\n\n if not found_start or not found_end:\n raise ValueError(unwrap(\n '''\n pem_bytes does not appear to contain PEM-encoded data - no\n BEGIN/END combination found\n '''\n ))", "def test_use_privatekey_file_unicode(self, tmpfile):\n self._use_privatekey_file_test(\n tmpfile.decode(getfilesystemencoding()) + NON_ASCII,\n FILETYPE_PEM,\n )", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def cert_to_pem(cert):\n return cert.public_bytes(Encoding.PEM)", "def _decode_public_key_identifier(identifier):\n\n return JWTAuth._get_identifier_cypher().decrypt(base64.b64decode(identifier)).decode('utf-8')", "def pem(ctx):\n click.echo(_get_pem(ctx().source))", "def from_pem(cls, data, password=None):\n p = cls()\n private_from_encoding(data, p, password)\n return p", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def deserializePrivateKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_private_key(string, password = None , backend = bc)", "def test_set_public_key_setter_pem_str(self) -> None:\n\n expected = self.pem_public_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_public_key(self.pem_public_key.decode())\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_public_key.public_bytes(\n serialization.Encoding.PEM, serialization.PublicFormat.PKCS1\n ).decode()\n\n self.assertEqual(expected, actual)", "def tearDown(self) -> None:\n\n del self.private_key\n del self.pem_private_key\n del self.pem_public_key\n del self.encrypted_pem_private_key", "def read_pem(input):\n data = []\n state = 0\n for line in input.split('\\n'):\n if state == 0:\n if line.startswith('-----BEGIN'):\n state = 1\n elif state == 1:\n if line.startswith('-----END'):\n state = 2\n else:\n data.append(line)\n elif state == 2:\n break\n if state != 2:\n raise ValueError, 'No PEM encoded input found'\n data = ''.join(data)\n data = data.decode('base64')\n return data", "def encode_key(self, key):\n return key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n ).decode(encoding='UTF-8')", "def load_cert(file):\n with open(file, \"r\") as pemfile:\n cert_content = pemfile.read()\n cert_stripped = \"\".join(\n [line for line in cert_content.splitlines() if \"CERTIFICATE\" not in line])\n\n logging.info('Loaded certificate from {}'.format(file))\n return cert_stripped", "def fix_key(public_key: str) -> str:\n\n if public_key.startswith(\"http://\") or public_key.startswith(\"https://\"):\n resp = requests.get(public_key)\n if public_key.endswith(\".json\"):\n key = resp.json()\n if \"keys\" in key:\n key = key[\"keys\"][0]\n jwk = jwcrypto.jwk.JWK.from_json(json.dumps(key))\n public_key = jwk.export_to_pem().decode(\"utf-8\")\n else:\n public_key = resp.content.decode(\"utf-8\")\n elif public_key.startswith(\"/\") or public_key.endswith((\".pem\")):\n with open(public_key, \"r\") as f:\n public_key = f.read()\n # ENV variables sometimes don't pass newlines, spec says white space\n # doesn't matter, but pyjwt cares about it, so fix it\n public_key = public_key.replace(\" PUBLIC \", \"_PLACEHOLDER_\")\n public_key = public_key.replace(\" \", \"\\n\")\n public_key = public_key.replace(\"_PLACEHOLDER_\", \" PUBLIC \")\n return public_key", "def clean_dict(data: dict) -> None:\n if not isinstance(data, dict):\n logger.warning(f\"Not a dictionary: {type(data)}\")\n return\n\n data.pop(\"_id\", None)\n data.pop(\"password\", None)\n\n return", "def aes_key_unwrap(self, kek: bytes, wrapped_key: bytes) -> bytes:\n return keywrap.aes_key_unwrap(kek, wrapped_key, default_backend())", "def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message", "def handle_pem_extension(oid, _input):\r\n try:\r\n cert = objects.X509(oid)\r\n cert.pem = _input.read()\r\n except (ValueError, TypeError, OSError) as failed_to_init:\r\n raise click.BadParameter(\r\n '[{0}]: File Content can\\'t be parsed or written.\\n {1}'.format(_input.name, _input.read())\r\n ) from failed_to_init", "def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def tls_certificate_private_key_pem_path(tls_certificate):\n with tls_certificate.private_key_pem.tempfile() as cert_key_pem:\n yield cert_key_pem", "def test_set_private_key_setter(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def test_get_private_key(self):\n\n expected = self.pem_private_key\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n actual = encryptor.get_private_key()\n\n self.assertEqual(expected, actual)", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def parse_config(self, data):\n match = re.search(\"-----BEGIN RSA PRIVATE KEY-----.*\" + \\\n \"-----END RSA PRIVATE KEY-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Private key not found\")\n key = match.group()\n\n match = re.search(\"-----BEGIN CERTIFICATE-----.*\" + \\\n \"-----END CERTIFICATE-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Certificate not found\")\n cert = match.group()\n # config also contains allowed, dns, but we don't use that for GCMU\n return (cert, key)", "def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def test_use_privatekey_file_bytes(self, tmpfile):\n self._use_privatekey_file_test(\n tmpfile + NON_ASCII.encode(getfilesystemencoding()),\n FILETYPE_PEM,\n )", "def get_pvk_pem_from_bytes(pvk: bytes) -> bytes:\n sk = ecdsa.SigningKey.from_string(pvk, curve=CURVE)\n\n return sk.to_pem()", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def load_private_key_pem(self, private_key_pem):\n return self.load_private_key(SigningKey.from_pem(private_key_pem))", "def resolve_password(obj, _):\n return obj.password.decode()", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def decrypted(data: str) -> str:\n\n return b64decode(data.encode('ascii')).decode('ascii')", "def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def solve(certificate_data: bytes) -> Certificate:\n return { # type: ignore\n Encoding.PEM: load_pem_x509_certificate,\n Encoding.DER: load_der_x509_certificate\n }[real_encoding](certificate_data, default_backend())", "def to_pem(self, encoding=\"pem\"):\n return public_to_pem(self, encoding)", "def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def pfx2pem_memmory(input_file):\n pfx = open(input_file, 'rb').read()\n p12 = crypto.load_pkcs12(pfx)\n pem = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pem += crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n return pem", "def decrypt(ctx, input, output):\n gpg_key = _get_gpg_key(_get_pem(ctx().source), ctx().user, ctx().verbose)\n _run_gpg_with_key(gpg_key, [\n '--decrypt', '--recipient',\n ctx().user, '--trust-model', 'always', '--armor'\n ], input, output, ctx().verbose)", "def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self", "def prepare_config(config: dict) -> dict:\n config.setdefault('password', None)\n config.setdefault('private_key', None)\n config.setdefault('private_key_pass', None)\n config.setdefault('to', None)\n\n return config", "def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')", "def mk_keyobj_from_private_key_pem(self, pemdat_string):\n if isinstance(pemdat_string, str):\n pemdat_string = pemdat_string.encode()\n self.private_key_obj = serialization.load_pem_private_key(pemdat_string, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def pem(b, name):\r\n s1 = b2a_base64(b)[:-1] # remove terminating \\n\r\n s2 = \"\"\r\n while s1:\r\n s2 += s1[:64] + \"\\n\"\r\n s1 = s1[64:]\r\n s = (\"-----BEGIN %s-----\\n\" % name) + s2 + \\\r\n (\"-----END %s-----\\n\" % name) \r\n return s", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def unwrap(self, key:bytes, wrapped_credential:bytes)->PublicKeyCredentialSource:\n unwrapped = keywrap.aes_key_unwrap_with_padding(key,wrapped_credential,default_backend())\n cred = PublicKeyCredentialSource()\n cred.from_bytes(unwrapped,True)\n cred.set_id(wrapped_credential)\n return cred", "def _scrub_auth_info(param_info, auth_param_name):\n info = param_info.copy()\n info[auth_param_name] = {key: '*' * len(str(value))\n for key, value in info[auth_param_name].items()}\n\n return info", "def base64_aes_decrypt(self,data,key):\n cipher = AES.new(key)\n try:\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))\n except Exception, ex:\n return ''", "def extract_ca_crt_bytes_from_pem(pem_content):\n begin_search = pem_content.find(constants.BEGIN_CERTIFICATE_MARKER)\n if begin_search < 0:\n raise exception.InvalidKubernetesCA\n\n end_search = pem_content.find(constants.END_CERTIFICATE_MARKER)\n if end_search < 0:\n raise exception.InvalidKubernetesCA\n\n end_search += len(constants.END_CERTIFICATE_MARKER)\n base64_crt = base64.encode_as_text(pem_content[begin_search:end_search])\n return base64_crt", "def test_use_privatekey_wrong_key(self, ctx_or_conn):\n key = PKey()\n key.generate_key(TYPE_RSA, 1024)\n ctx_or_conn.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n with pytest.raises(Error):\n ctx_or_conn.use_privatekey(key)", "def decode(self, crypto):", "def jwt_key_to_pem(self, key_json_dict):\n pub_key = RSAAlgorithm.from_jwk(json.dumps(key_json_dict))\n return pub_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n )", "def get_pem():\n try:\n with open('encrypted_pem.txt', 'r') as encrypted_pem:\n pem_file = encrypted_pem.read()\n\n kms = boto3.client('kms', region_name=REGION)\n return kms.decrypt(CiphertextBlob=b64decode(pem_file))['Plaintext']\n except (IOError, ClientError, KeyError) as err:\n LOGGER.error(err)\n return False", "def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)", "def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt_password(pass_to_decrypt):\n\n pass_to_decrypt = fk.decrypt(pass_to_decrypt)\n return pass_to_decrypt.decode()", "def unarmor(pem_bytes, multiple=False):\n\n generator = _unarmor(pem_bytes)\n\n if not multiple:\n return next(generator)\n\n return generator", "def to_pem(self, encoding=\"pem\"):\n return self.publicArea.to_pem(encoding)", "def parsePEMKey(s, private=False, public=False, passwordCallback=None,\r\n implementations=[\"openssl\", \"python\"]):\r\n for implementation in implementations:\r\n if implementation == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n key = OpenSSL_RSAKey.parse(s, passwordCallback)\r\n break\r\n elif implementation == \"python\":\r\n key = Python_RSAKey.parsePEM(s)\r\n break\r\n else:\r\n raise ValueError(\"No acceptable implementations\")\r\n\r\n return _parseKeyHelper(key, private, public)", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def serializePrivateKey(private_key):\n\treturn private_key.private_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PrivateFormat.PKCS8,\n\t\tencryption_algorithm=serialization.NoEncryption()\n\t)", "def del_key(self):\n # Deleting the values from the self.key and self.cryptor attributes.\n self.key=None\n self.cryptor=None", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def import_private_key_from_pem_file(filename, passphrase=None):\n with open(filename, \"rb\") as key_file:\n private_key = serialization.load_pem_private_key(\n key_file.read(), password=passphrase, backend=default_backend()\n )\n return private_key", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data" ]
[ "0.5717775", "0.5633551", "0.55747586", "0.55693495", "0.55062234", "0.5487441", "0.544285", "0.5425336", "0.5414394", "0.5376748", "0.53717756", "0.53678894", "0.53630817", "0.53443956", "0.5333764", "0.53271896", "0.532627", "0.5300595", "0.5287096", "0.5276774", "0.52395916", "0.5225118", "0.51815087", "0.5176023", "0.51483715", "0.51152956", "0.5108976", "0.51032734", "0.50908834", "0.5081816", "0.5074472", "0.50706124", "0.50697", "0.50569844", "0.50528055", "0.5046622", "0.5040904", "0.50319797", "0.5028584", "0.5019582", "0.5017938", "0.501657", "0.5008241", "0.49968478", "0.498696", "0.49791384", "0.49761975", "0.4964607", "0.4959414", "0.49551553", "0.49404424", "0.49331135", "0.49268854", "0.48806596", "0.4880401", "0.48802623", "0.4874796", "0.48650682", "0.48473933", "0.48462364", "0.4846056", "0.4845097", "0.48437798", "0.48322466", "0.48289442", "0.48276046", "0.48251268", "0.48055625", "0.4777817", "0.47776768", "0.47759917", "0.4767608", "0.4756752", "0.47545394", "0.4751102", "0.47490928", "0.47476497", "0.47471023", "0.47389838", "0.47352996", "0.4734411", "0.47318685", "0.4729502", "0.47243372", "0.47149876", "0.47104746", "0.47066778", "0.47027954", "0.47027776", "0.4695493", "0.4683696", "0.46735826", "0.46701887", "0.46587354", "0.46580118", "0.46556532", "0.46552145", "0.46533448", "0.4651788", "0.4646456" ]
0.5872964
0
Parses a PKCS1 private key, or encrypted private key
def _unarmor_pem_openssl_private(headers, data, password): enc_algo = None enc_iv_hex = None enc_iv = None if 'DEK-Info' in headers: params = headers['DEK-Info'] if params.find(',') != -1: enc_algo, enc_iv_hex = params.strip().split(',') else: enc_algo = 'RC4' if not enc_algo: return data if enc_iv_hex: enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii')) enc_algo = enc_algo.lower() enc_key_length = { 'aes-128-cbc': 16, 'aes-128': 16, 'aes-192-cbc': 24, 'aes-192': 24, 'aes-256-cbc': 32, 'aes-256': 32, 'rc4': 16, 'rc4-64': 8, 'rc4-40': 5, 'rc2-64-cbc': 8, 'rc2-40-cbc': 5, 'rc2-cbc': 16, 'rc2': 16, 'des-ede3-cbc': 24, 'des-ede3': 24, 'des3': 24, 'des-ede-cbc': 16, 'des-cbc': 8, 'des': 8, }[enc_algo] enc_key = hashlib.md5(password + enc_iv[0:8]).digest() while enc_key_length > len(enc_key): enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest() enc_key = enc_key[0:enc_key_length] enc_algo_name = { 'aes-128-cbc': 'aes', 'aes-128': 'aes', 'aes-192-cbc': 'aes', 'aes-192': 'aes', 'aes-256-cbc': 'aes', 'aes-256': 'aes', 'rc4': 'rc4', 'rc4-64': 'rc4', 'rc4-40': 'rc4', 'rc2-64-cbc': 'rc2', 'rc2-40-cbc': 'rc2', 'rc2-cbc': 'rc2', 'rc2': 'rc2', 'des-ede3-cbc': 'tripledes', 'des-ede3': 'tripledes', 'des3': 'tripledes', 'des-ede-cbc': 'tripledes', 'des-cbc': 'des', 'des': 'des', }[enc_algo] decrypt_func = crypto_funcs[enc_algo_name] if enc_algo_name == 'rc4': return decrypt_func(enc_key, data) return decrypt_func(enc_key, data, enc_iv)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))", "def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")", "def parsePEMKey(s, private=False, public=False, passwordCallback=None,\r\n implementations=[\"openssl\", \"python\"]):\r\n for implementation in implementations:\r\n if implementation == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n key = OpenSSL_RSAKey.parse(s, passwordCallback)\r\n break\r\n elif implementation == \"python\":\r\n key = Python_RSAKey.parsePEM(s)\r\n break\r\n else:\r\n raise ValueError(\"No acceptable implementations\")\r\n\r\n return _parseKeyHelper(key, private, public)", "def parse_config(self, data):\n match = re.search(\"-----BEGIN RSA PRIVATE KEY-----.*\" + \\\n \"-----END RSA PRIVATE KEY-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Private key not found\")\n key = match.group()\n\n match = re.search(\"-----BEGIN CERTIFICATE-----.*\" + \\\n \"-----END CERTIFICATE-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Certificate not found\")\n cert = match.group()\n # config also contains allowed, dns, but we don't use that for GCMU\n return (cert, key)", "def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]", "def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key):\n algorithm = RsaAlgorithmIdentifier()\n algorithm[\"rsaEncryption\"] = RSA_ENCRYPTION_ASN1_OID\n\n pkcs8_key = PKCS8PrivateKey()\n pkcs8_key[\"version\"] = 0\n pkcs8_key[\"privateKeyAlgorithm\"] = algorithm\n pkcs8_key[\"privateKey\"] = pkcs1_key\n\n return encoder.encode(pkcs8_key)", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)", "def read_chain_pair(private_key, certificates):\n with open(private_key, 'rb') as f:\n private_key = f.read()\n with open(certificates, 'rb') as f:\n certificates = f.read()\n return (private_key, certificates)", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def _parse_pkcs12(data, password, load_private_key):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n certs = {}\n private_keys = {}\n\n pfx = Pfx.load(data)\n\n auth_safe = pfx['auth_safe']\n if auth_safe['content_type'].native != 'data':\n raise ValueError(pretty_message(\n '''\n Only password-protected PKCS12 files are currently supported\n '''\n ))\n authenticated_safe = pfx.authenticated_safe\n\n mac_data = pfx['mac_data']\n if mac_data:\n mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native\n key_length = {\n 'sha1': 20,\n 'sha224': 28,\n 'sha256': 32,\n 'sha384': 48,\n 'sha512': 64,\n 'sha512_224': 28,\n 'sha512_256': 32,\n }[mac_algo]\n mac_key = pkcs12_kdf(\n mac_algo,\n password,\n mac_data['mac_salt'].native,\n mac_data['iterations'].native,\n key_length,\n 3 # ID 3 is for generating an HMAC key\n )\n hash_mod = getattr(hashlib, mac_algo)\n computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest()\n stored_hmac = mac_data['mac']['digest'].native\n if not constant_compare(computed_hmac, stored_hmac):\n raise ValueError('Password provided is invalid')\n\n for content_info in authenticated_safe:\n content = content_info['content']\n\n if isinstance(content, OctetString):\n _parse_safe_contents(content.native, certs, private_keys, password, load_private_key)\n\n elif isinstance(content, EncryptedData):\n encrypted_content_info = content['encrypted_content_info']\n\n encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm']\n encrypted_content = encrypted_content_info['encrypted_content'].native\n decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)\n\n _parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key)\n\n else:\n raise ValueError(pretty_message(\n '''\n Public-key-based PKCS12 files are not currently supported\n '''\n ))\n\n key_fingerprints = set(private_keys.keys())\n cert_fingerprints = set(certs.keys())\n\n common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))\n\n key = None\n cert = None\n other_certs = []\n\n if len(common_fingerprints) >= 1:\n fingerprint = common_fingerprints[0]\n key = private_keys[fingerprint]\n cert = certs[fingerprint]\n other_certs = [certs[f] for f in certs if f != fingerprint]\n return (key, cert, other_certs)\n\n if len(private_keys) > 0:\n first_key = sorted(list(private_keys.keys()))[0]\n key = private_keys[first_key]\n\n if len(certs) > 0:\n first_key = sorted(list(certs.keys()))[0]\n cert = certs[first_key]\n del certs[first_key]\n\n if len(certs) > 0:\n other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly)\n\n return (key, cert, other_certs)", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def rsa_string_to_privatekey(mystr):\r\n if len(mystr.split()) != 3:\r\n raise ValueError, \"Invalid private key string\"\r\n \r\n return {'d':long(mystr.split()[0]), 'p':long(mystr.split()[1]), 'q':long(mystr.split()[2])}", "def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()", "def rsa_public_key_pkcs1_to_pkcs8(pkcs1_key):\n algorithm = RsaAlgorithmIdentifier()\n algorithm[\"rsaEncryption\"] = RSA_ENCRYPTION_ASN1_OID\n\n pkcs8_key = PublicKeyInfo()\n pkcs8_key[\"algorithm\"] = algorithm\n pkcs8_key[\"publicKey\"] = univ.BitString.fromOctetString(pkcs1_key)\n\n return encoder.encode(pkcs8_key)", "def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def deserializePrivateKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_private_key(string, password = None , backend = bc)", "def test_get_private_key(self):\n\n expected = self.pem_private_key\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n actual = encryptor.get_private_key()\n\n self.assertEqual(expected, actual)", "def test_rsa_key(self):\n key1 = generate_private_key(u'rsa')\n self.assertIsInstance(key1,rsa.RSAPrivateKey)\n key2 = generate_private_key(u'rsa')\n self.assertIsInstance(key2, rsa.RSAPrivateKey)\n self.assertNotEqual(\n key1.public_key().public_numbers(),\n key2.public_key().public_numbers()\n )", "def parse_public(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, algo, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a public key or\n certificate, but rather a private key\n '''\n ))\n\n # When a public key returning from _unarmor_pem has a known algorithm\n # of RSA, that means the DER structure is of the type RSAPublicKey, so\n # we need to wrap it in the PublicKeyInfo structure.\n if algo == 'rsa':\n return PublicKeyInfo.wrap(data, 'rsa')\n\n if key_type is None or key_type == 'public key':\n try:\n pki = PublicKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PublicKeyInfo\n\n try:\n rpk = RSAPublicKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n rpk.native\n return PublicKeyInfo.wrap(rpk, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPublicKey\n\n if key_type is None or key_type == 'certificate':\n try:\n parsed_cert = Certificate.load(data)\n key_info = parsed_cert['tbs_certificate']['subject_public_key_info']\n return key_info\n except (ValueError):\n pass # Data was not a cert\n\n raise ValueError('The data specified does not appear to be a known public key or certificate format')", "def validate_privatekey_pem(key_pem):\n assert isinstance(key_pem, str)\n\n private_key_cryptography = serialization.load_pem_private_key(\n data=key_pem.encode('ascii'),\n password=None,\n backend=cryptography_default_backend\n )\n\n if not isinstance(private_key_cryptography, rsa.RSAPrivateKey):\n sys.exit('Unexpected private key type')\n\n return private_key_cryptography", "def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))", "def rsa_pkcs1v15_decrypt(self, data):\n pass", "def test_private_key_ec(self):\n priv = \"\"\"-----BEGIN EC PARAMETERS-----\nBggqhkjOPQMBBw==\n-----END EC PARAMETERS-----\n-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIJZ57L6f6ywtZa7VhsvthAShxjdrL9EIrVwVgxnmD5b3oAoGCCqGSM49\nAwEHoUQDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALsbu2xNCDBXJ0IJ4Sd\n3u4G1qvrKX0mBHd7yUPGui+7bvp084mNag==\n-----END EC PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBiTCCAS+gAwIBAgIJAINtiwRC4eBJMAoGCCqGSM49BAMCMCExDzANBgNVBAMM\nBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwHhcNMTgwNTI3MTAyNTIyWhcNMTgwNjI2\nMTAyNTIyWjAhMQ8wDQYDVQQDDAZFQyAyNTYxDjAMBgNVBAoMBVdlYkNBMFkwEwYH\nKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALs\nbu2xNCDBXJ0IJ4Sd3u4G1qvrKX0mBHd7yUPGui+7bvp084mNaqNQME4wHQYDVR0O\nBBYEFEmE51rEUz4TuD8oEAw2lvMfvi6LMB8GA1UdIwQYMBaAFEmE51rEUz4TuD8o\nEAw2lvMfvi6LMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgfiKDoHB3\nWzRO1juSMyVBuBw2p1o0ab+3fBNDvff8PXcCIQCUKIyzTnM7Wz6TkABfqOcmx7n4\nsbRvdOg3CepLGW3Ytw==\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_EC)", "def _InitFromString(self, text):\r\n # First, remove all whitespace:\r\n text = re.sub(_WHITESPACE_RE, '', text)\r\n\r\n # Parse out the period-separated components\r\n match = _KEY_RE.match(text)\r\n if not match:\r\n raise ValueError('Badly formatted key string: \"%s\"', text)\r\n\r\n private_exp = match.group('private_exp')\r\n if private_exp:\r\n private_exp = _B64ToNum(private_exp)\r\n else:\r\n private_exp = None\r\n self.keypair = Crypto.PublicKey.RSA.construct(\r\n (_B64ToNum(match.group('mod')),\r\n _B64ToNum(match.group('exp')),\r\n private_exp))", "def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key", "def mk_keyobj_from_private_key_pem(self, pemdat_string):\n if isinstance(pemdat_string, str):\n pemdat_string = pemdat_string.encode()\n self.private_key_obj = serialization.load_pem_private_key(pemdat_string, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def private_key(self):\n if self._private_key is not None:\n return self._private_key[0]\n\n spk = self.serialized_private_key\n passphrase = self.passphrase\n\n try:\n self._private_key = [\n serialization.load_pem_private_key(\n self.serialized_private_key,\n backend=default_backend(),\n password=self.passphrase)]\n\n return self._private_key[0]\n\n except:\n raise\n self._private_key = [None]\n return self._private_key[0]", "def decrypt_using_private_key(message):\n public_key_path = os.path.join('keys', 'private.key')\n with open(public_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n cipher = PKCS1_OAEP.new(private_key)\n encrypted = cipher.decrypt(message)\n return encrypted.hex()", "def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def get_rsa_key_pair() -> Tuple[str, Optional[str]]:\n if RSA_PUBLIC_KEY_PATH is not None:\n # Read public key.\n with open(RSA_PUBLIC_KEY_PATH) as f_obj:\n public_key = f_obj.read()\n\n # Read private key if given.\n private_key = None\n if RSA_PRIVATE_KEY_PATH is not None:\n with open(RSA_PRIVATE_KEY_PATH) as f_obj:\n private_key = f_obj.read()\n\n return (public_key, private_key)\n\n if RSA_PUBLIC_KEY is not None:\n return (RSA_PUBLIC_KEY, RSA_PRIVATE_KEY)\n\n return create_rsa_key_pair()", "def _get_pubkey_from_pem_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_pem_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def public_from_private(self, private_key):", "def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)", "def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key", "def read_from(cls, s, taproot: bool = False):\n first = s.read(1)\n origin = None\n if first == b\"[\":\n prefix, char = read_until(s, b\"]\")\n if char != b\"]\":\n raise ArgumentError(\"Invalid key - missing ]\")\n origin = KeyOrigin.from_string(prefix.decode())\n else:\n s.seek(-1, 1)\n k, char = read_until(s, b\",)/\")\n der = b\"\"\n # there is a following derivation\n if char == b\"/\":\n der, char = read_until(s, b\"<{,)\")\n # legacy branches: {a,b,c...}\n if char == b\"{\":\n der += b\"{\"\n branch, char = read_until(s, b\"}\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing }\")\n der += branch + b\"}\"\n rest, char = read_until(s, b\",)\")\n der += rest\n # multipart descriptor: <a;b;c;...>\n elif char == b\"<\":\n der += b\"<\"\n branch, char = read_until(s, b\">\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing >\")\n der += branch + b\">\"\n rest, char = read_until(s, b\",)\")\n der += rest\n if char is not None:\n s.seek(-1, 1)\n # parse key\n k, xonly_repr = cls.parse_key(k, taproot)\n # parse derivation\n allow_hardened = isinstance(k, bip32.HDKey) and isinstance(k.key, ec.PrivateKey)\n derivation = AllowedDerivation.from_string(\n der.decode(), allow_hardened=allow_hardened\n )\n return cls(k, origin, derivation, taproot, xonly_repr)", "def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def parse_dict(self, data):\n self.public_key = VerifyingKey(data['public_key'])\n self.signature = base58.b58decode(data['signature']) if data['signature'] else None", "def test_create_and_import_encrypted_rsa(self):\n name = \"key_encrypted\"\n password = \"123456\"\n bits= 3072\n generate_and_write_rsa_keypair(name, bits, password)\n private_key = import_rsa_key_from_file(name, password)\n public_key = import_rsa_key_from_file(name + \".pub\")\n\n securesystemslib.formats.KEY_SCHEMA.check_match(private_key)\n self.assertTrue(private_key[\"keyval\"].get(\"private\"))\n self.assertTrue(\n securesystemslib.formats.PUBLIC_KEY_SCHEMA.matches(public_key))", "def get_key_pair_from_pvk_pem_file(fpath: str) -> typing.Tuple[bytes, bytes]:\n pvk = _get_bytes_from_pem_file(fpath).decode(\"UTF-8\")\n sk = ecdsa.SigningKey.from_pem(pvk)\n\n return _get_key_pair_from_sk(sk)", "def create_rsa_key_pair() -> Tuple[str, str]:\n key = RSA.generate(RSA_KEY_STRENGTH)\n public_key = key.publickey().export_key().decode()\n private_key = key.export_key().decode()\n return public_key, private_key", "def __init__(self, private_key):\n if private_key:\n if isinstance(private_key, str): # base58 encoded string\n self.private_key = PrivateKey.from_b58check(private_key)\n else:\n self.private_key = private_key\n self.public_key = self.private_key.public_key\n else:\n self.private_key = None\n self.public_key = None", "def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')", "def rsa_public_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PublicKeyInfo())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid public key encoding.\")\n\n return decoded_key[\"publicKey\"].asOctets()", "def extract_ca_private_key_bytes_from_pem(pem_content):\n found_marker = False\n for begin_marker in [constants.BEGIN_PRIVATE_KEY_MARKER,\n constants.BEGIN_RSA_PRIVATE_KEY_MARKER]:\n begin_search = pem_content.find(begin_marker)\n if begin_search >= 0:\n found_marker = True\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n found_marker = False\n for end_marker in [constants.END_PRIVATE_KEY_MARKER,\n constants.END_RSA_PRIVATE_KEY_MARKER]:\n end_search = pem_content.find(end_marker)\n if end_search >= 0:\n found_marker = True\n end_search += len(end_marker)\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n base64_key = base64.encode_as_text(pem_content[begin_search:end_search])\n return base64_key", "def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def private_key(self):", "def read_keypair(priv_key_file, public_key_file):\n key_pair = {}\n with open(priv_key_file) as f:\n key_data = f.read()\n f.close()\n key_pair[\"key\"] = key_data\n with open(public_key_file) as f:\n pub_data = f.read()\n f.close()\n key_pair[\"pub\"] = pub_data\n for i in [priv_key_file, public_key_file]:\n os.remove(i)\n return key_pair", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def check_equal_rsa_priv_key(sk2_priv, sk_priv):\n pri_n = sk_priv.private_numbers()\n pri_n2 = sk2_priv.private_numbers()\n\n # the library guarantees this: p is the larger factor\n self.assertTrue(pri_n.p > pri_n.q)\n\n self.assertTrue(\n pri_n2.p == pri_n.p and\n pri_n2.q == pri_n.q and\n pri_n2.d == pri_n.d and\n pri_n2.dmp1 == pri_n.dmp1 and\n pri_n2.dmq1 == pri_n.dmq1 and\n pri_n2.iqmp == pri_n.iqmp)", "def parse_key(self, key):\r\n if not key:\r\n self.aes = None # empty key == no encryption\r\n return self.parse_string(self.tmp) # must return size (see the next return)\r\n key.decode() # test availability\r\n size = len(key)\r\n for padding in (16, 24, 32): # fixed key size\r\n if size <= padding:\r\n break\r\n key += chr(0) * (padding - size)\r\n self.aes = AES.new(key)\r\n return self.parse_string(self.tmp) # if key changes you must update string\r", "def _rsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n content.write_string('ssh-rsa')\n content.write_mpint(numbers.public_numbers.n)\n content.write_mpint(numbers.public_numbers.e)\n content.write_mpint(numbers.d)\n content.write_mpint(numbers.iqmp)\n content.write_mpint(numbers.p)\n content.write_mpint(numbers.q)\n return content.data", "def parse_certificate(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a private key\n '''\n ))\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a public key\n '''\n ))\n\n if key_type is None or key_type == 'certificate':\n try:\n return Certificate.load(data)\n except (ValueError):\n pass # Data was not a Certificate\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known certificate format\n '''\n ))", "def check_equal_rsa_pub_key(sk2_, sk_):\n pub_n = sk_.public_numbers()\n pub_n2 = sk2_.public_numbers()\n\n self.assertEqual(pub_n2.e, pub_n.e)\n self.assertEqual(pub_n2.n, pub_n.n)", "def test_public_key_rsa(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_RSA)", "def rsa_is_valid_privatekey(key):\r\n # must be a dict\r\n if type(key) is not dict:\r\n return False\r\n\r\n # missing the right keys\r\n if 'd' not in key or 'p' not in key or 'q' not in key:\r\n return False\r\n\r\n # has extra data in the key\r\n if len(key) != 3:\r\n return False\r\n\r\n for item in ['d', 'p', 'q']:\r\n # must have integer or long types for the key components...\r\n if type(key[item]) is not int and type(key[item]) is not long:\r\n return False\r\n\r\n if number_isPrime(key['p']) and number_isPrime(key['q']):\r\n # Seems valid...\r\n return True\r\n else:\r\n return False", "def checkKeyFile(file : str, typ : str) -> bool:\n return True\n with open(file, \"r\") as file:\n first_line = file.readline()\n for last_line in file:\n pass\n \n if typ == \"private\" :\n if(first_line == \"---begin monRSA private key---\\n\"):\n if(last_line == \"---end monRSA key---\"):\n return True\n return False\n elif typ == \"public\" :\n if(first_line == \"---begin monRSA public key---\\n\"):\n if(last_line == \"---end monRSA key---\"):\n return True\n return False\n else :\n print(\"wrong type\")\n return False", "def _try_load_ca_private_key(path):\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'rb').read())\n if pkey.bits() < 2048:\n raise ValueError(\"I'm sorry Dave, I can't let you use a small \"\n \"RSA key.\")\n pkey.check()\n return pkey", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {'privateExponent': util.Decode(rsa['privateExponent']),\n 'primeP': util.Decode(rsa['primeP']),\n 'primeQ': util.Decode(rsa['primeQ']),\n 'primeExponentP': util.Decode(rsa['primeExponentP']),\n 'primeExponentQ': util.Decode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Decode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())", "def __decryptRSA(msg, user):\n # Load user's private key\n try:\n with open(\"%s/%s/keys/privateKey.pem\" % (USERS, user), \"rb\") as f:\n privateKey = serialization.load_pem_private_key(\n f.read(),\n password=None,\n backend=default_backend()\n )\n f.close()\n except:\n print(\"Error opening user's private key\")\n print(sys.exc_info())\n return None\n \n # Decrypt message\n return privateKey.decrypt(\n msg, \n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def _create_rsa_key_pair(self, length, public_exponent=65537):\n self.logger.info(\n \"Generating an RSA key pair with length: {0}, and \"\n \"public_exponent: {1}\".format(\n length, public_exponent\n )\n )\n try:\n private_key = rsa.generate_private_key(\n public_exponent=public_exponent,\n key_size=length,\n backend=default_backend())\n public_key = private_key.public_key()\n\n private_bytes = private_key.private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n public_bytes = public_key.public_bytes(\n serialization.Encoding.DER,\n serialization.PublicFormat.PKCS1)\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure(\n \"An error occurred while generating the RSA key pair. \"\n \"See the server log for more information.\"\n )\n\n public_key = {\n 'value': public_bytes,\n 'format': enums.KeyFormatType.PKCS_1,\n 'public_exponent': public_exponent\n }\n private_key = {\n 'value': private_bytes,\n 'format': enums.KeyFormatType.PKCS_8,\n 'public_exponent': public_exponent\n }\n\n return public_key, private_key", "def get_private_key(self, address58: str) -> 'EllipticCurvePrivateKey':\n return self.keys[address58]", "def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)", "def _get_private_key(self, privkey=None):\n\n # read private keys from keyring\n privkeys = self.gpg.list_keys(True) # True => private keys\n if len(privkeys) > 0 and privkeys[-1].has_key('fingerprint'):\n fingerprints = []\n for k in privkeys:\n fingerprints.append(k['fingerprint'])\n else:\n # no private key in keyring\n return None\n\n if privkey:\n # check for existence of private key received as argument\n # DEVEL: check for expiration as well\n if len(privkey) > 7 and len(privkey) <= 40:\n for fp in fingerprints:\n if fp.endswith(privkey):\n # work with last 16 significant chars internally,\n # even if only 8 are required in trac.ini\n privkey = fp[-16:]\n break\n # no fingerprint matching key ID\n else:\n privkey = None\n else:\n # reset invalid key ID\n privkey = None\n else:\n # select (last) private key from keyring\n privkey = fingerprints[-1][-16:]\n\n return privkey", "def _load_private_key(self, filename, keytype=None):\n type_map = {\n 'dsa': ssh.DSSKey,\n 'rsa': ssh.RSAKey}\n\n if keytype is None:\n with open(filename, 'rb') as k:\n keydata = k.read()\n \n m = re.search(\"BEGIN (.*?) PRIVATE KEY\", keydata)\n if m:\n keytype = m.group(1)\n\n keycls = type_map.get(keytype.lower(), 'dsa')\n\n try:\n key = keycls.from_private_key_file(filename)\n log.debug(\"Loaded key '%s' without password.\", filename)\n except ssh.PasswordRequiredException:\n passphrase = self.config.get('passphrase')\n \n if callable(passphrase):\n passphrase = passphrase(filename,\n self.config.get('remote_host', 'localhost'),\n self.config.get('username', getpass.getuser()))\n if passphrase is None:\n return\n\n if not passphrase:\n passphrase = getpass.getpass(\"Key passphrase: \")\n \n key = keycls.from_private_key_file(filename, passphrase)\n\n return key", "def get_key_from_keyring(self):\n private_key = keyring.get_password(self.keyring_service_name, \"private_key\")\n\n if private_key is not None:\n return base64.b64decode(private_key)\n else:\n return None", "def test_encryption_with_pkcs1v15(self) -> None:\n\n given = \"Hello, World!\"\n expected = b\"Hello, World!\"\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n encryptor.set_public_key(self.pem_public_key)\n\n actual = encryptor.encrypt_data(given, padd=\"PKCS1v15\")\n\n self.assertNotEqual(given, actual)\n\n actual = encryptor.decrypt_data(actual, padd=\"PKCS1v15\")\n\n self.assertEqual(expected, actual)", "def test_set_private_key_setter_pem_str(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "async def retrieve_private_key(self) -> Tuple[str, str]:\n\n filename, file_path = random.choice(self._private_keys)\n async with aiofiles.open(file_path, mode='r') as file:\n private_key = await file.read()\n return private_key, self._create_public_key_identifier(filename)", "def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if tbsCertificateP.value[0]==0xA0:\r\n subjectPublicKeyInfoIndex = 6\r\n else:\r\n subjectPublicKeyInfoIndex = 5\r\n\r\n #Get the subject\r\n self.subject = tbsCertificateP.getChildBytes(\\\r\n subjectPublicKeyInfoIndex - 1)\r\n\r\n #Get the subjectPublicKeyInfo\r\n subjectPublicKeyInfoP = tbsCertificateP.getChild(\\\r\n subjectPublicKeyInfoIndex)\r\n\r\n #Get the algorithm\r\n algorithmP = subjectPublicKeyInfoP.getChild(0)\r\n rsaOID = algorithmP.value\r\n if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:\r\n raise SyntaxError(\"Unrecognized AlgorithmIdentifier\")\r\n\r\n #Get the subjectPublicKey\r\n subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)\r\n\r\n #Adjust for BIT STRING encapsulation\r\n if (subjectPublicKeyP.value[0] !=0):\r\n raise SyntaxError()\r\n subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])\r\n\r\n #Get the modulus and exponent\r\n modulusP = subjectPublicKeyP.getChild(0)\r\n publicExponentP = subjectPublicKeyP.getChild(1)\r\n\r\n #Decode them into numbers\r\n n = bytesToNumber(modulusP.value)\r\n e = bytesToNumber(publicExponentP.value)\r\n\r\n #Create a public key instance\r\n self.publicKey = _createPublicRSAKey(n, e)", "def private_key_from_nep2(nep2_key: str, passphrase: str,\n scrypt_parameters: Optional[wallet.ScryptParameters] = None) -> bytes:\n if scrypt_parameters is None:\n scrypt_parameters = wallet.ScryptParameters()\n\n if len(nep2_key) != 58:\n raise ValueError(f\"Please provide a nep2_key with a length of 58 bytes (LEN: {len(nep2_key)})\")\n\n address_hash_size = 4\n address_hash_offset = len(NEP_FLAG) + len(NEP_HEADER)\n\n try:\n decoded_key = base58.b58decode_check(nep2_key)\n except Exception:\n raise ValueError(\"Base58decode failure of nep2 key\")\n\n address_checksum = decoded_key[address_hash_offset:address_hash_offset + address_hash_size]\n encrypted = decoded_key[-32:]\n\n pwd_normalized = bytes(unicodedata.normalize(\"NFC\", passphrase), \"utf-8\")\n derived = hashlib.scrypt(password=pwd_normalized, salt=address_checksum,\n n=scrypt_parameters.n,\n r=scrypt_parameters.r,\n p=scrypt_parameters.p,\n dklen=64)\n\n derived1 = derived[:32]\n derived2 = derived[32:]\n\n cipher = AES.new(derived2, AES.MODE_ECB)\n decrypted = cipher.decrypt(encrypted)\n private_key = Account._xor_bytes(decrypted, derived1)\n\n # Now check that the address hashes match. If they don't, the password was wrong.\n key_pair = cryptography.KeyPair(private_key=private_key)\n script_hash = to_script_hash(contracts.Contract.create_signature_redeemscript(key_pair.public_key))\n address = Account.script_hash_to_address(script_hash)\n first_hash = hashlib.sha256(address.encode(\"utf-8\")).digest()\n second_hash = hashlib.sha256(first_hash).digest()\n checksum = second_hash[:4]\n if checksum != address_checksum:\n raise ValueError(f\"Wrong passphrase or key was encrypted with an address version that is not \"\n f\"{settings.network.account_version}\")\n\n return private_key", "def load_private_key_pem(self, private_key_pem):\n return self.load_private_key(SigningKey.from_pem(private_key_pem))", "def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def baca_kunci_rsa():\n filename = ambil_file(['key'])\n if filename.endswith('.key'):\n with open(filename,\"rb\") as f:\n kunci = f.readlines()\n return kunci\n else:\n return False", "def test_set_private_key_setter_encrypted_pem(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(\n self.encrypted_pem_private_key, password=self.private_key_password\n )\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def get_key_pair_from_pvk_b64(pvk_b64: str):\n pvk = base64.b64decode(pvk_b64)\n sk = ecdsa.SigningKey.from_string(pvk, curve=CURVE)\n\n return _get_key_pair_from_sk(sk)", "def test_set_private_key_setter(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def ReadKey(type, key):\n try:\n return {keyinfo.AES: AesKey.Read,\n keyinfo.HMAC_SHA1: HmacKey.Read,\n keyinfo.DSA_PRIV: DsaPrivateKey.Read,\n keyinfo.RSA_PRIV: RsaPrivateKey.Read,\n keyinfo.DSA_PUB: DsaPublicKey.Read,\n keyinfo.RSA_PUB: RsaPublicKey.Read}[type](key)\n except KeyError:\n raise errors.KeyczarError(\"Unsupported key type: %s\" % type)", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key", "def load_x509_cert(url, httpc, spec2key, **get_args):\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []", "def Read(key):\n rsa = json.loads(key)\n pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))\n params = {\n 'privateExponent': util.Base64WSDecode(rsa['privateExponent']),\n 'primeP': util.Base64WSDecode(rsa['primeP']),\n 'primeQ': util.Base64WSDecode(rsa['primeQ']),\n 'primeExponentP': util.Base64WSDecode(rsa['primeExponentP']),\n 'primeExponentQ': util.Base64WSDecode(rsa['primeExponentQ']),\n 'crtCoefficient': util.Base64WSDecode(rsa['crtCoefficient'])\n }\n\n key = RSA.construct((util.BytesToLong(pub.params['modulus']),\n util.BytesToLong(pub.params['publicExponent']),\n util.BytesToLong(params['privateExponent']),\n util.BytesToLong(params['primeQ']),\n util.BytesToLong(params['primeP']),\n util.BytesToLong(params['crtCoefficient'])))\n return RsaPrivateKey(params, pub, key, rsa['size'])", "def _get_key_pair_from_sk(sk: ecdsa.SigningKey) -> typing.Tuple[bytes, bytes]:\n return sk.to_string(), \\\n sk.verifying_key.to_string(\"compressed\")", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def gk_handshake_1_2_aes( self , packet ):\n\t\ttry:\n\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext\t= self.handleAES.decapsulate( packet , self.TK )\n\t\t\tpacket \t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_SHA1_AES','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 AES' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.SHA )\n\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Retrieve the Group Temporal key.\n\t\t\tself.GTK = self.handleAES.unwrapKey( new_packet.WPAKey , self.KEK ) # Resulting key of 16/32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise", "def read_public_key(f: IO[str]) -> Tuple[str, str, str, str]:\n data = f.read()\n try:\n kind, key, comment = data.split(\" \")\n if kind.startswith(\"ssh-\") and comment:\n base64.b64decode(key)\n return (kind, key, comment, data)\n except ValueError:\n pass\n\n raise click.ClickException(\"{} is not a valid SSH key\".format(f.name))", "def extractParamsFromKey(key: str) -> []:\n l = base64.b64decode(key).decode('ascii')\n \n param1 = l.split('\\n')[0]\n param2 = l.split('\\n')[1]\n #convert back to int\n param1 = int(param1, 16)\n param2 = int(param2, 16)\n \n if args.verbose : print(param1,param2)\n return [param1,param2]", "def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)" ]
[ "0.7084714", "0.6720833", "0.6673835", "0.6610752", "0.6331672", "0.6264508", "0.6190057", "0.6077617", "0.6024118", "0.6018048", "0.5971257", "0.59107363", "0.5894486", "0.58921105", "0.58741915", "0.5793097", "0.57621163", "0.5711211", "0.5711122", "0.5708818", "0.570693", "0.56914663", "0.56707627", "0.56693476", "0.5659855", "0.5626094", "0.56121975", "0.5570402", "0.55692464", "0.55530477", "0.55443406", "0.55343336", "0.55286014", "0.5522538", "0.55019265", "0.54994977", "0.54756194", "0.54745823", "0.5451984", "0.54449874", "0.54291654", "0.5423206", "0.54142946", "0.54119664", "0.540753", "0.5397992", "0.53897184", "0.5383952", "0.5363698", "0.5360642", "0.535912", "0.5329867", "0.5323189", "0.53129786", "0.53123695", "0.52751315", "0.5274671", "0.52742106", "0.52723235", "0.5258513", "0.525757", "0.5249563", "0.5248255", "0.523642", "0.5233039", "0.520947", "0.5209455", "0.5196564", "0.5194819", "0.5180362", "0.51782024", "0.51724803", "0.51632434", "0.51607776", "0.5157774", "0.5156921", "0.51550186", "0.5154873", "0.513776", "0.51297426", "0.51189464", "0.5117774", "0.5107863", "0.50942504", "0.5092471", "0.5092111", "0.5086787", "0.50813377", "0.5073578", "0.5065991", "0.50623333", "0.5061655", "0.50507224", "0.5039641", "0.5032399", "0.5029181", "0.50266004", "0.50242835", "0.5018967", "0.5018374" ]
0.5370357
48
Parses a PKCS12 ANS.1 DERencoded structure and extracts certs and keys
def _parse_pkcs12(data, password, load_private_key): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if password is not None: if not isinstance(password, byte_cls): raise TypeError(pretty_message( ''' password must be a byte string, not %s ''', type_name(password) )) else: password = b'' certs = {} private_keys = {} pfx = Pfx.load(data) auth_safe = pfx['auth_safe'] if auth_safe['content_type'].native != 'data': raise ValueError(pretty_message( ''' Only password-protected PKCS12 files are currently supported ''' )) authenticated_safe = pfx.authenticated_safe mac_data = pfx['mac_data'] if mac_data: mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native key_length = { 'sha1': 20, 'sha224': 28, 'sha256': 32, 'sha384': 48, 'sha512': 64, 'sha512_224': 28, 'sha512_256': 32, }[mac_algo] mac_key = pkcs12_kdf( mac_algo, password, mac_data['mac_salt'].native, mac_data['iterations'].native, key_length, 3 # ID 3 is for generating an HMAC key ) hash_mod = getattr(hashlib, mac_algo) computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest() stored_hmac = mac_data['mac']['digest'].native if not constant_compare(computed_hmac, stored_hmac): raise ValueError('Password provided is invalid') for content_info in authenticated_safe: content = content_info['content'] if isinstance(content, OctetString): _parse_safe_contents(content.native, certs, private_keys, password, load_private_key) elif isinstance(content, EncryptedData): encrypted_content_info = content['encrypted_content_info'] encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm'] encrypted_content = encrypted_content_info['encrypted_content'].native decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password) _parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key) else: raise ValueError(pretty_message( ''' Public-key-based PKCS12 files are not currently supported ''' )) key_fingerprints = set(private_keys.keys()) cert_fingerprints = set(certs.keys()) common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints)) key = None cert = None other_certs = [] if len(common_fingerprints) >= 1: fingerprint = common_fingerprints[0] key = private_keys[fingerprint] cert = certs[fingerprint] other_certs = [certs[f] for f in certs if f != fingerprint] return (key, cert, other_certs) if len(private_keys) > 0: first_key = sorted(list(private_keys.keys()))[0] key = private_keys[first_key] if len(certs) > 0: first_key = sorted(list(certs.keys()))[0] cert = certs[first_key] del certs[first_key] if len(certs) > 0: other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly) return (key, cert, other_certs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if tbsCertificateP.value[0]==0xA0:\r\n subjectPublicKeyInfoIndex = 6\r\n else:\r\n subjectPublicKeyInfoIndex = 5\r\n\r\n #Get the subject\r\n self.subject = tbsCertificateP.getChildBytes(\\\r\n subjectPublicKeyInfoIndex - 1)\r\n\r\n #Get the subjectPublicKeyInfo\r\n subjectPublicKeyInfoP = tbsCertificateP.getChild(\\\r\n subjectPublicKeyInfoIndex)\r\n\r\n #Get the algorithm\r\n algorithmP = subjectPublicKeyInfoP.getChild(0)\r\n rsaOID = algorithmP.value\r\n if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:\r\n raise SyntaxError(\"Unrecognized AlgorithmIdentifier\")\r\n\r\n #Get the subjectPublicKey\r\n subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)\r\n\r\n #Adjust for BIT STRING encapsulation\r\n if (subjectPublicKeyP.value[0] !=0):\r\n raise SyntaxError()\r\n subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])\r\n\r\n #Get the modulus and exponent\r\n modulusP = subjectPublicKeyP.getChild(0)\r\n publicExponentP = subjectPublicKeyP.getChild(1)\r\n\r\n #Decode them into numbers\r\n n = bytesToNumber(modulusP.value)\r\n e = bytesToNumber(publicExponentP.value)\r\n\r\n #Create a public key instance\r\n self.publicKey = _createPublicRSAKey(n, e)", "def asn1_loads(asn1_str):\n\n # ASN.1 grammar\n identifier = pp.Word(pp.alphas + \"_\")\n assign = pp.Literal(\"::=\")\n # typedef = identifier.setName(\"typeref\") + assign + identifier.setName(\"basetype\")\n comment1 = pp.Literal(\"#\") + pp.originalTextFor(pp.SkipTo(pp.LineEnd()))\n # typelist = pp.OneOrMore(typedef)\n meta1 = pp.LineStart() + identifier + pp.Literal(\":\") + pp.SkipTo(pp.LineEnd()).setDebug()\n meta2 = pp.LineStart() + pp.White() + pp.SkipTo(pp.LineEnd()).setDebug()\n metaval = meta1 + pp.ZeroOrMore(meta2)\n # metalist = pp.ZeroOrMore(comment1) + pp.Literal(\"/*\") + pp.OneOrMore(metaval) + pp.Literal(\"*/\")\n metalist = pp.SkipTo(pp.Literal(\"/*\")).setDebug() + pp.Literal(\"/*\") + pp.OneOrMore(\n metaval).setDebug() + pp.Literal(\"*/\")\n\n asn1 = metalist.parseString(asn1_str, parseAll=False)\n print(asn1)\n jaen = {\"meta\": {}, \"types\": []}\n return jaen", "def _extract_values_from_certificate(cert):\n logger = getLogger(__name__)\n # cert and serial number\n data = {\n u'cert': cert,\n u'issuer': cert.get_issuer().der(),\n u'serial_number': cert.get_serial_number(),\n u'algorithm': rfc2437.id_sha1,\n u'algorithm_parameter': univ.Any(hexValue='0500') # magic number\n }\n # DN Hash\n data[u'name'] = cert.get_subject()\n cert_der = data[u'name'].der()\n sha1_hash = hashlib.sha1()\n sha1_hash.update(cert_der)\n data[u'name_hash'] = sha1_hash.hexdigest()\n\n # public key Hash\n data['key_hash'] = _get_pubickey_sha1_hash(cert).hexdigest()\n\n # CRL and OCSP\n data['crl'] = None\n ocsp_uris0 = []\n for idx in range(cert.get_extension_count()):\n e = cert.get_extension(idx)\n if e.get_short_name() == b'authorityInfoAccess':\n for line in str(e).split(u\"\\n\"):\n m = OCSP_RE.match(line)\n if m:\n logger.debug(u'OCSP URL: %s', m.group(1))\n ocsp_uris0.append(m.group(1))\n elif e.get_short_name() == b'crlDistributionPoints':\n for line in str(e).split(u\"\\n\"):\n m = CRL_RE.match(line)\n if m:\n logger.debug(u\"CRL: %s\", m.group(1))\n data['crl'] = m.group(1)\n\n if len(ocsp_uris0) == 1:\n data['ocsp_uri'] = ocsp_uris0[0]\n elif len(ocsp_uris0) == 0:\n data['ocsp_uri'] = u''\n else:\n raise OperationalError(\n msg=u'More than one OCSP URI entries are specified in '\n u'the certificate',\n errno=ER_FAILED_TO_GET_OCSP_URI,\n )\n data[u'is_root_ca'] = cert.get_subject() == cert.get_issuer()\n return data", "def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self", "def from_bytes(cls, bytes):\n construct = _constructs.Certificate.parse(bytes)\n return cls(\n certificate_list=[\n ASN1Cert(\n asn1_cert=asn1cert.asn1_cert\n )\n for asn1cert in construct.certificate_list],\n )", "def parse_config(self, data):\n match = re.search(\"-----BEGIN RSA PRIVATE KEY-----.*\" + \\\n \"-----END RSA PRIVATE KEY-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Private key not found\")\n key = match.group()\n\n match = re.search(\"-----BEGIN CERTIFICATE-----.*\" + \\\n \"-----END CERTIFICATE-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Certificate not found\")\n cert = match.group()\n # config also contains allowed, dns, but we don't use that for GCMU\n return (cert, key)", "def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def parse_der_certificates(der_bytes: bytes) -> List[Certificate]:\n\n result = []\n try:\n leaf = x509.load_der_x509_certificate(der_bytes, default_backend())\n result.append(leaf)\n _, remaining_data = decode(der_bytes)\n while len(remaining_data) > 0:\n cert = x509.load_der_x509_certificate(remaining_data, default_backend())\n result.append(cert)\n _, remaining_data = decode(remaining_data)\n except Exception:\n raise X509CertificateError('Unable to parse DER X.509 certificate')\n\n return result", "def dumpasn1(self):\n\n ret = None\n fn = \"dumpasn1.%d.tmp\" % os.getpid()\n try:\n f = open(fn, \"wb\")\n f.write(self.get_DER())\n f.close()\n p = subprocess.Popen((\"dumpasn1\", \"-a\", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n ret = \"\\n\".join(x for x in p.communicate()[0].splitlines() if x.startswith(\" \"))\n except Exception, e:\n ret = \"[Could not run dumpasn1: %s]\" % e\n finally:\n os.unlink(fn)\n return ret", "def get_der(self):\n return OpenSSL.crypto.dump_certificate(\n OpenSSL.crypto.FILETYPE_ASN1, self._cert)", "def info_from_args(args):\n return CertInfo(\n subject=parse_dn(args.subject),\n usage=parse_list(args.usage),\n alt_names=parse_list(args.san),\n ocsp_nocheck=args.ocsp_nocheck,\n ocsp_must_staple=args.ocsp_must_staple,\n ocsp_must_staple_v2=args.ocsp_must_staple_v2,\n ocsp_urls=parse_list(args.ocsp_urls),\n crl_urls=parse_list(args.crl_urls),\n issuer_urls=parse_list(args.issuer_urls),\n permit_subtrees=parse_list(args.permit_subtrees),\n exclude_subtrees=parse_list(args.exclude_subtrees),\n ca=args.CA,\n path_length=args.path_length)", "def verify(self, ta):\n\n try:\n cms = self.get_POW()\n except:\n if self.print_on_der_error:\n logger.debug(\"Problem parsing DER CMS message, might not really be DER: %r\",\n self.get_DER())\n raise rpki.exceptions.UnparsableCMSDER\n\n if cms.eContentType() != self.econtent_oid:\n raise rpki.exceptions.WrongEContentType(\"Got CMS eContentType %s, expected %s\" % (\n cms.eContentType(), self.econtent_oid))\n\n certs = [X509(POW = x) for x in cms.certs()]\n crls = [CRL(POW = c) for c in cms.crls()]\n\n if self.debug_cms_certs:\n for x in certs:\n logger.debug(\"Received CMS cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n for c in crls:\n logger.debug(\"Received CMS CRL issuer %r\", c.getIssuer())\n\n now = rpki.sundial.now()\n\n trusted_ee = None\n trusted_ca = []\n untrusted_ee = None\n\n for x in X509.normalize_chain(ta):\n if self.debug_cms_certs:\n logger.debug(\"CMS trusted cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n if x.getNotAfter() < now:\n raise rpki.exceptions.TrustedCMSCertHasExpired(\"Trusted CMS certificate has expired\",\n \"%s (%s)\" % (x.getSubject(), x.hSKI()))\n if x.is_CA():\n trusted_ca.append(x)\n else:\n if trusted_ee is None:\n trusted_ee = x\n else:\n raise rpki.exceptions.MultipleCMSEECert(\"Multiple CMS EE certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))\n\n if trusted_ee:\n if self.debug_cms_certs:\n logger.debug(\"Trusted CMS EE cert issuer %s subject %s SKI %s\",\n trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())\n if len(certs) > 1 or (len(certs) == 1 and\n (certs[0].getSubject() != trusted_ee.getSubject() or\n certs[0].getPublicKey() != trusted_ee.getPublicKey())):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n if crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n else:\n untrusted_ee = [x for x in certs if not x.is_CA()]\n if len(untrusted_ee) < 1:\n raise rpki.exceptions.MissingCMSEEcert\n if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n untrusted_ee = untrusted_ee[0]\n if len(crls) < 1:\n if self.require_crls:\n raise rpki.exceptions.MissingCMSCRL\n else:\n logger.warning(\"MISSING CMS CRL! Ignoring per self.require_crls setting\")\n if len(crls) > 1 and not self.allow_extra_crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n for x in certs:\n if x.getNotAfter() < now:\n raise rpki.exceptions.CMSCertHasExpired(\"CMS certificate has expired\", \"%s (%s)\" % (\n x.getSubject(), x.hSKI()))\n\n for c in crls:\n if c.getNextUpdate() < now:\n logger.warning(\"Stale BPKI CMS CRL (%s %s %s)\", c.getNextUpdate(), c.getIssuer(), c.hAKI())\n\n # XXX Verify certificate chain via X.509 machinery, not CMS\n # machinery. Awful mess due to history, needs cleanup, but\n # get it working again first.\n\n cert = (trusted_ee or untrusted_ee).get_POW()\n\n cert.verify(trusted = (x.get_POW() for x in trusted_ca),\n crl = crls[0].get_POW() if untrusted_ee and crls else None)\n\n try:\n # XXX This isn't right yet, but let's test before gettting more complicated\n #\n # Aside from all the type and exception abominations, the\n # main problem here is that we're no longer verifying the\n # certificate chain, just the CMS signature. Certificate\n # verificaiton is a separate step under the new scheme,\n # and probably comes before this, but let's write down\n # what the problem is before it gets lost...\n\n content = cms.verify(certs = (x.get_POW() for x in X509.normalize_chain(ta)),\n flags = rpki.POW.CMS_NO_SIGNER_CERT_VERIFY)\n except:\n if self.dump_on_verify_failure:\n if self.dump_using_dumpasn1:\n dbg = self.dumpasn1()\n else:\n dbg = cms.pprint()\n logger.warning(\"CMS verification failed, dumping ASN.1 (%d octets):\", len(self.get_DER()))\n for line in dbg.splitlines():\n logger.warning(line)\n\n # XXX Old code replaced rpki.POW exception with this. For\n # debugging I'd rather see what POW has to say; decide\n # later whether to keep this change.\n #\n #raise rpki.exceptions.CMSVerificationFailed(\"CMS verification failed\")\n raise\n\n return content", "def test_pkcs12_ordering():\n\n def make_cert(name):\n key = ec.generate_private_key(ec.SECP256R1())\n subject = x509.Name(\n [\n x509.NameAttribute(x509.NameOID.COMMON_NAME, name),\n ]\n )\n now = datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(subject)\n .public_key(key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(now)\n .not_valid_after(now)\n .sign(key, hashes.SHA256())\n )\n return (key, cert)\n\n # Make some certificates with distinct names.\n a_name = \"A\" * 20\n b_name = \"B\" * 20\n c_name = \"C\" * 20\n a_key, a_cert = make_cert(a_name)\n _, b_cert = make_cert(b_name)\n _, c_cert = make_cert(c_name)\n\n # Bundle them in a PKCS#12 file in order A, B, C.\n p12 = serialize_key_and_certificates(\n b\"p12\", a_key, a_cert, [b_cert, c_cert], serialization.NoEncryption()\n )\n\n # Parse them out. The API should report them in the same order.\n (key, cert, certs) = load_key_and_certificates(p12, None)\n assert cert == a_cert\n assert certs == [b_cert, c_cert]\n\n # The ordering in the PKCS#12 file itself should also match.\n a_idx = p12.index(a_name.encode(\"utf-8\"))\n b_idx = p12.index(b_name.encode(\"utf-8\"))\n c_idx = p12.index(c_name.encode(\"utf-8\"))\n\n assert a_idx < b_idx < c_idx", "def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))", "def get_certinfo(doc):\n\n #set a two second default timeout to recieve a cert\n socket.setdefaulttimeout(2)\n doc['ssl'] = {} \n\n try:\n cert = ssl.get_server_certificate((doc['hostname'], 443))\n #sometimes certs come back as unicode so cast to str() aka ascii\n cert = M2Crypto.X509.load_cert_string(str(cert))\n\n except:\n syslog.syslog('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n print('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n #lets remove the ssl key and return the doc untouched\n doc.pop('ssl')\n return doc\n\n\n #get creation date\n doc['ssl']['created'] = cert.get_not_before().get_datetime().isoformat()\n #get not valid after, aka expiration data\n doc['ssl']['expire'] = cert.get_not_after().get_datetime().isoformat()\n #get issuer information\n doc['ssl']['issuer'] = cert.get_issuer().as_text()\n #get subject information\n doc['ssl']['subject'] = cert.get_subject().as_text()\n #get keysize, size() returns in bytes, so we multiply * 8 to get the number of bits\n doc['ssl']['keysize'] = cert.get_pubkey().size() * 8\n #get cert fingerprint for comparison\n doc['ssl']['fingerprint'] = cert.get_fingerprint()\n\n return doc", "def _check_certificate(public_cert_content, priv_key_content,\n domain=None, at_time=None):\n result = {}\n # Read the private key and public certificate\n try:\n priv_key = OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, priv_key_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate_key': {\n 'state': 'invalid', 'detail': str(err)}})\n priv_key = None\n\n try:\n public_cert = OpenSSL.crypto.load_certificate(\n OpenSSL.crypto.FILETYPE_PEM, public_cert_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate': {\n 'state': 'invalid', 'detail': str(err)}})\n public_cert = None\n\n if priv_key and public_cert:\n context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n context.use_privatekey(priv_key)\n context.use_certificate(public_cert)\n try:\n context.check_privatekey()\n except OpenSSL.SSL.Error:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate does not match private key.\"}})\n\n if result:\n raise RuntimeError(result)\n\n not_after = public_cert.get_notAfter()\n if not isinstance(not_after, six.string_types):\n not_after = not_after.decode('utf-8')\n not_after = datetime.datetime.strptime(not_after, \"%Y%m%d%H%M%SZ\")\n common_name = public_cert.get_subject().commonName\n alt_names = []\n for ext_idx in range(0, public_cert.get_extension_count()):\n extension = public_cert.get_extension(ext_idx)\n if extension.get_short_name().decode('utf-8') == 'subjectAltName':\n # data of the X509 extension, encoded as ASN.1\n decoded_alt_names, _ = asn1_decoder(\n extension.get_data(), asn1Spec=SubjectAltName())\n for alt in nat_encoder(decoded_alt_names):\n alt_name = alt['dNSName'].decode('utf-8')\n if alt_name != common_name:\n alt_names += [alt_name]\n if domain:\n found = False\n for alt_name in [common_name] + alt_names:\n regex = alt_name.replace('.', r'\\.').replace('*', r'.*') + '$'\n if re.match(regex, domain) or alt_name == domain:\n found = True\n break\n if not found:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"domain name (%s) does not match common or alt names\"\\\n \" present in certificate (%s, %s).\" % (\n domain, common_name, ','.join(alt_names))}})\n if at_time:\n if not_after <= at_time:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate is only valid until %s.\" % not_after}})\n\n if result:\n raise RuntimeError(result)\n\n result.update({'ssl_certificate': {\n 'common_name': common_name,\n 'alt_names': alt_names,\n 'state': result.get('ssl_certificate', {}).get('state', 'valid'),\n 'issuer': public_cert.get_issuer().organizationName,\n 'ends_at': not_after.isoformat()}})\n return result", "def unpack_keys_from_xfer(key_pack_hex: hex,\n path=paths.nacl_keys,\n *args,\n **kwargs):\n global public_box\n\n try:\n key_dict = public_box.decrypt(key_pack_hex)\n key_dict = json.loads(key_dict)\n\n aes_key = key_dict[\"aes\"]\n AES256Cipher().write_key(aes_key.encode())\n\n fernet_key = key_dict[\"fernet\"]\n FernetCipher().write_key(fernet_key.encode())\n\n chacha_key = key_dict[\"chacha\"]\n XChaCha20Poly1305.write_key(Base64Encoder.decode(chacha_key))\n\n except:\n print(sysMsgList.keysUnpackFail)", "def Certificate(self) -> _n_8_t_0:", "def Certificate(self) -> _n_8_t_0:", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")", "def _extract_certificate_chain(connection):\n logger = getLogger(__name__)\n cert_data = {}\n logger.debug(\n \"# of certificates: %s\",\n len(connection.get_peer_cert_chain()))\n\n for cert in connection.get_peer_cert_chain():\n logger.debug(\n u'subject: %s, issuer: %s', cert.get_subject(),\n cert.get_issuer())\n data = _extract_values_from_certificate(cert)\n logger.debug('is_root_ca: %s', data[u'is_root_ca'])\n cert_data[cert.get_subject().der()] = data\n return _create_pair_issuer_subject(cert_data)", "def test_public_key_dsa(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIICDjCCAcqgAwIBAgIJAMcdoiKyV98cMAsGCWCGSAFlAwQDAjAiMRAwDgYDVQQD\nDAdEU0EgNTEyMQ4wDAYDVQQKDAVXZWJDQTAeFw0xODA1MjcxMDI1MjBaFw0xODA2\nMjYxMDI1MjBaMCIxEDAOBgNVBAMMB0RTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMIHw\nMIGoBgcqhkjOOAQBMIGcAkEAnogScrza9M5nFogjwu7MUSgOeWRfHSFWKLiFxfkN\nOAb1Z5oXTUKRcKdSxfI1zu47rvyqV6+4SSkQEsVJ2/7DQQIVANuQv4L3sp8AiUn+\nmwCyXhedQl2ZAkBfCDLU4nx7OeMx+vD9MN7FW57pHm/43B1Tu/cUOWcp5VHPJRuV\nWJqINIteY/0ilFEUCMibgol8Upj6CGnuDpvTA0MAAkAbnRx76A8r+o/3I5hlrlAm\nCi68uiiqW6W2R40U2g/KlIiafMEQ3+OrMwwkPX0aaJwa8m7lsUlmhhYOXu5p4fL1\no1AwTjAdBgNVHQ4EFgQUHub1qPkaKCtkQbmu3RnLaa8QAP4wHwYDVR0jBBgwFoAU\nHub1qPkaKCtkQbmu3RnLaa8QAP4wDAYDVR0TBAUwAwEB/zALBglghkgBZQMEAwID\nMQAwLgIVAMOEZCvJoNjIMzbH0yWrEUS6IxywAhUAzDhkGKvAH1V3o2ZsJsIotFUk\nIiQ=\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_DSA)", "def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')", "def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)", "def parse(obj):\n data = json.loads(obj)\n cryptopars = init_crypto_args(**data)\n return cryptopars\n # return cipherdata(cryptopars, **data)", "def parse(self, xml_text):\n xml_doc = parse_doc(xml_text)\n data = findtext(xml_doc, \"Data\")\n if data is None:\n return\n\n cryptutil = CryptUtil(conf.get_openssl_cmd())\n p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME)\n p7m = (\"MIME-Version:1.0\\n\"\n \"Content-Disposition: attachment; filename=\\\"{0}\\\"\\n\"\n \"Content-Type: application/x-pkcs7-mime; name=\\\"{1}\\\"\\n\"\n \"Content-Transfer-Encoding: base64\\n\"\n \"\\n\"\n \"{2}\").format(p7m_file, p7m_file, data)\n\n self.client.save_cache(p7m_file, p7m)\n\n trans_prv_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_PRV_FILE_NAME)\n trans_cert_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_CERT_FILE_NAME)\n pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME)\n # decrypt certificates\n cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file,\n pem_file)\n\n # The parsing process use public key to match prv and crt.\n buf = []\n begin_crt = False\n begin_prv = False\n prvs = {}\n thumbprints = {}\n index = 0\n v1_cert_list = []\n with open(pem_file) as pem:\n for line in pem.readlines():\n buf.append(line)\n if re.match(r'[-]+BEGIN.*KEY[-]+', line):\n begin_prv = True\n elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line):\n begin_crt = True\n elif re.match(r'[-]+END.*KEY[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'prv', buf)\n pub = cryptutil.get_pubkey_from_prv(tmp_file)\n prvs[pub] = tmp_file\n buf = []\n index += 1\n begin_prv = False\n elif re.match(r'[-]+END.*CERTIFICATE[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'crt', buf)\n pub = cryptutil.get_pubkey_from_crt(tmp_file)\n thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file)\n thumbprints[pub] = thumbprint\n # Rename crt with thumbprint as the file name\n crt = \"{0}.crt\".format(thumbprint)\n v1_cert_list.append({\n \"name\": None,\n \"thumbprint\": thumbprint\n })\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt))\n buf = []\n index += 1\n begin_crt = False\n\n # Rename prv key with thumbprint as the file name\n for pubkey in prvs:\n thumbprint = thumbprints[pubkey]\n if thumbprint:\n tmp_file = prvs[pubkey]\n prv = \"{0}.prv\".format(thumbprint)\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv))\n\n for v1_cert in v1_cert_list:\n cert = Cert()\n set_properties(\"certs\", cert, v1_cert)\n self.cert_list.certificates.append(cert)", "def test__format_asn_dict(self, parser):\n for key, value in RPKI_Validator_Wrapper.get_validity_dict().items():\n d = {'asn': 'AS198051', 'prefix': '1.2.0.0/16', 'validity': key}\n assert parser._format_asn_dict(d) == [198051, '1.2.0.0/16', value]", "def _ParseCertificateArguments(client, args):\n self_managed = None\n managed = None\n certificate_type = None\n if args.certificate:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.SELF_MANAGED\n certificate = files.ReadFileContents(args.certificate)\n private_key = files.ReadFileContents(args.private_key)\n self_managed = client.messages.SslCertificateSelfManagedSslCertificate(\n certificate=certificate, privateKey=private_key)\n if args.domains:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.MANAGED\n managed = client.messages.SslCertificateManagedSslCertificate(\n domains=args.domains)\n return certificate_type, self_managed, managed", "def test_parse_direct_response(self):\n response = \"\"\"\\\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<createCustomerProfileTransactionResponse xmlns=\"AnetApi/xml/v1/schema/AnetApiSchema.xsd\">\n <messages>\n <resultCode>Ok</resultCode>\n <message>\n <code>I00001</code>\n <text>Successful.</text>\n </message>\n </messages>\n <directResponse>*1*;*1*;*1*;*This transaction has been approved.*;*000000*;*Y*;*2000000001*;*INV000001*;*description of transaction*;*10.95*;*CC*;*auth_capture*;*custId123*;*John*;*Doe*;**;*123 Main St., foo*;*Bellevue*;*WA*;*98004*;*USA*;*000-000-0000*;**;*[email protected]*;*John*;*Doe*;**;*123 Main St.*;*Bellevue*;*WA*;*98004*;*USA*;*1.00*;*0.00*;*2.00*;*FALSE*;*PONUM000001*;*D18EB6B211FE0BBF556B271FDA6F92EE*;*M*;*buaaahahah , *;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;*wallers,*</directResponse>\n</createCustomerProfileTransactionResponse>\"\"\"\n resp = x.to_dict(response, responses.cim_map, delimiter=u\";\", encapsulator=u\"*\")\n assert resp.direct_response.code == u\"1\"\n assert resp.direct_response.address == u\"123 Main St., foo\"\n assert resp.direct_response.holder_verification == u\"buaaahahah , \"", "def parse_certificate(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a private key\n '''\n ))\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a public key\n '''\n ))\n\n if key_type is None or key_type == 'certificate':\n try:\n return Certificate.load(data)\n except (ValueError):\n pass # Data was not a Certificate\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known certificate format\n '''\n ))", "def der(self) -> bytes:\n return self.asn1.dump()", "def rsa_pkcs1v15_decrypt(self, data):\n pass", "def decode(self, crypto):", "def key2tokenAndDSN(self, key):\n\t import binascii\n\t import struct\n\t import hashlib\n\n\t self.keystr = struct.pack(\"!Q\", key)\n\t self.h = hashlib.sha1(self.keystr.rjust(8,'\\00'))\n\t self.shastr=self.h.digest() # binary\n\t #shastr = struct.pack(\"!IIIII\", *struct.unpack(\"@IIIII\",shastr)) #to net\n\t self.token, self.dsn = self.shastr[0:4], self.shastr[-8:]\n\t #print \"raw: %s (len=%i)\"%(shastr,len(shastr)) \n\t #print \"hex: %s\"% binascii.hexlify(token), \"%s\"%binascii.hexlify(dsn)\n\t self.d1, self.d2 = struct.unpack(\"!II\",self.dsn)\n\t self.token, self.dsn = (struct.unpack(\"!I\",self.token)[0], (long(self.d2)<<32)+self.d1)\n\t #print \"token: %x\"% token\n\t #print \"dsn: %x\" % dsn\n\t return (self.token, self.dsn)", "def read_from(cls, s, taproot: bool = False):\n first = s.read(1)\n origin = None\n if first == b\"[\":\n prefix, char = read_until(s, b\"]\")\n if char != b\"]\":\n raise ArgumentError(\"Invalid key - missing ]\")\n origin = KeyOrigin.from_string(prefix.decode())\n else:\n s.seek(-1, 1)\n k, char = read_until(s, b\",)/\")\n der = b\"\"\n # there is a following derivation\n if char == b\"/\":\n der, char = read_until(s, b\"<{,)\")\n # legacy branches: {a,b,c...}\n if char == b\"{\":\n der += b\"{\"\n branch, char = read_until(s, b\"}\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing }\")\n der += branch + b\"}\"\n rest, char = read_until(s, b\",)\")\n der += rest\n # multipart descriptor: <a;b;c;...>\n elif char == b\"<\":\n der += b\"<\"\n branch, char = read_until(s, b\">\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing >\")\n der += branch + b\">\"\n rest, char = read_until(s, b\",)\")\n der += rest\n if char is not None:\n s.seek(-1, 1)\n # parse key\n k, xonly_repr = cls.parse_key(k, taproot)\n # parse derivation\n allow_hardened = isinstance(k, bip32.HDKey) and isinstance(k.key, ec.PrivateKey)\n derivation = AllowedDerivation.from_string(\n der.decode(), allow_hardened=allow_hardened\n )\n return cls(k, origin, derivation, taproot, xonly_repr)", "def asn1_dumps(jaen):\n asn1 = \"/*\\n\"\n hdrs = jaen[\"meta\"]\n hdr_list = [\"module\", \"title\", \"version\", \"description\", \"namespace\", \"root\", \"import\"]\n for h in hdr_list + list(set(hdrs) - set(hdr_list)):\n if h in hdrs:\n if h == \"description\":\n asn1 += fill(hdrs[h], width=80, initial_indent=\"{0:14} \".format(h + \":\"), subsequent_indent=15*\" \") + \"\\n\"\n elif h == \"import\":\n hh = \"{:14} \".format(h + \":\")\n for k, v in hdrs[h].items():\n asn1 += hh + k + \": \" + v + \"\\n\"\n hh = 15*\" \"\n else:\n asn1 += \"{0:14} {1:}\\n\".format(h + \":\", hdrs[h])\n asn1 += \"*/\\n\"\n\n asn1 += \"\\n\" + typeref(jaen[\"meta\"][\"module\"]) + \" DEFINITIONS ::=\\nBEGIN\\n\"\n\n for t in jaen[\"types\"]:\n tname, ttype = t[0:2]\n topts = parse_type_opts(t[2])\n tos = '(PATTERN \"' + topts[\"pattern\"] + '\")' if \"pattern\" in topts else \"\"\n asn1 += \"\\n\" + typeref(tname) + \" ::= \" + _asn1type(ttype) + tos\n if len(t) == 4:\n titems = deepcopy(t[3])\n for i in titems:\n i[1] = identifier(i[1])\n if len(i) > 2:\n i[2] = _asn1type(i[2])\n asn1 += \" {\\n\"\n flen = min(32, max(12, max([len(i[1]) for i in titems]) + 1 if titems else 0))\n if ttype.lower() == \"enumerated\":\n fmt = \" {1:\" + str(flen) + \"} ({0:d})\"\n asn1 += \",\\n\".join([fmt.format(*i) for i in titems])\n else:\n fmt = \" {1:\" + str(flen) + \"} [{0:d}] {2}{3}\"\n if ttype.lower() == 'record':\n fmt = \" {1:\" + str(flen) + \"} {2}{3}\"\n items = []\n for i in titems:\n ostr = \"\"\n opts = parse_field_opts(i[3])\n if \"atfield\" in opts:\n ostr += \".&\" + opts[\"atfield\"]\n del opts[\"atfield\"]\n if opts[\"optional\"]:\n ostr += \" OPTIONAL\"\n del opts[\"optional\"]\n items += [fmt.format(i[0], i[1], i[2], ostr) + (\" ***\" + str(opts) if opts else \"\")]\n asn1 += \",\\n\".join(items)\n asn1 += \"\\n}\\n\" if titems else \"}\\n\"\n else:\n asn1 += \"\\n\"\n asn1 += \"\\nEND\\n\"\n return asn1", "def load_x509_cert(url, httpc, spec2key, **get_args):\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []", "def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key):\n algorithm = RsaAlgorithmIdentifier()\n algorithm[\"rsaEncryption\"] = RSA_ENCRYPTION_ASN1_OID\n\n pkcs8_key = PKCS8PrivateKey()\n pkcs8_key[\"version\"] = 0\n pkcs8_key[\"privateKeyAlgorithm\"] = algorithm\n pkcs8_key[\"privateKey\"] = pkcs1_key\n\n return encoder.encode(pkcs8_key)", "def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))", "def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key):\n\n if isinstance(safe_contents, byte_cls):\n safe_contents = SafeContents.load(safe_contents)\n\n for safe_bag in safe_contents:\n bag_value = safe_bag['bag_value']\n\n if isinstance(bag_value, CertBag):\n if bag_value['cert_id'].native == 'x509':\n cert = bag_value['cert_value'].parsed\n public_key_info = cert['tbs_certificate']['subject_public_key_info']\n certs[_fingerprint(public_key_info, None)] = bag_value['cert_value'].parsed\n\n elif isinstance(bag_value, PrivateKeyInfo):\n private_keys[_fingerprint(bag_value, load_private_key)] = bag_value\n\n elif isinstance(bag_value, EncryptedPrivateKeyInfo):\n encryption_algorithm_info = bag_value['encryption_algorithm']\n encrypted_key_bytes = bag_value['encrypted_data'].native\n decrypted_key_bytes = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_key_bytes, password)\n private_key = PrivateKeyInfo.load(decrypted_key_bytes)\n private_keys[_fingerprint(private_key, load_private_key)] = private_key\n\n elif isinstance(bag_value, SafeContents):\n _parse_safe_contents(bag_value, certs, private_keys, password, load_private_key)\n\n else:\n # We don't care about CRL bags or secret bags\n pass", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def extract_certs_from_pem(pem_contents):\n start = 0\n certs = []\n while True:\n index = pem_contents.find(constants.BEGIN_CERTIFICATE_MARKER, start)\n if index == -1:\n break\n try:\n cert = x509.load_pem_x509_certificate(pem_contents[index::],\n default_backend())\n except Exception:\n LOG.exception(_(\"Load pem x509 certificate failed at file \"\n \"location: %s\") % index)\n raise exception.SysinvException(_(\n \"Failed to load pem x509 certificate\"))\n\n certs.append(cert)\n start = index + len(constants.BEGIN_CERTIFICATE_MARKER)\n return certs", "def fetch_x509_context(self) -> X509Context:", "def rsa_public_key_pkcs1_to_pkcs8(pkcs1_key):\n algorithm = RsaAlgorithmIdentifier()\n algorithm[\"rsaEncryption\"] = RSA_ENCRYPTION_ASN1_OID\n\n pkcs8_key = PublicKeyInfo()\n pkcs8_key[\"algorithm\"] = algorithm\n pkcs8_key[\"publicKey\"] = univ.BitString.fromOctetString(pkcs1_key)\n\n return encoder.encode(pkcs8_key)", "def pem_armor_certificate(certificate):\n\n return asymmetric.dump_certificate(certificate)", "def from_binary(self, d):\n p = MsgEcdsaCertificate._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))", "def opensslCmsSignedDataCreate( conveyedInfoFile, cert, privateKey ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-sign\", \"-in\", conveyedInfoFile,\n \"-signer\", cert,\n \"-inkey\", privateKey,\n \"-outform\", \"der\", \"-nodetach\" ]\n conveyedInfoCmsSignedDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsSignedDerBase64", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def test_rsa_ca(self):\n key = c.KEY_RSA\n usage = [\n c.KU_DIGITALSIGNATURE,\n c.KU_NONREPUDIATION,\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n c.KU_KEYCERTSIGN,\n c.KU_CRLSIGN,\n ]\n self.assertTrue(utils.check_key_usage(key, usage, True))", "def test_private_key_dsa(self):\n priv = \"\"\"-----BEGIN DSA PRIVATE KEY-----\nMIH4AgEAAkEAnogScrza9M5nFogjwu7MUSgOeWRfHSFWKLiFxfkNOAb1Z5oXTUKR\ncKdSxfI1zu47rvyqV6+4SSkQEsVJ2/7DQQIVANuQv4L3sp8AiUn+mwCyXhedQl2Z\nAkBfCDLU4nx7OeMx+vD9MN7FW57pHm/43B1Tu/cUOWcp5VHPJRuVWJqINIteY/0i\nlFEUCMibgol8Upj6CGnuDpvTAkAbnRx76A8r+o/3I5hlrlAmCi68uiiqW6W2R40U\n2g/KlIiafMEQ3+OrMwwkPX0aaJwa8m7lsUlmhhYOXu5p4fL1AhUAuxjeo0++fjI+\nnEIPmnCNPGjuBY8=\n-----END DSA PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIICDjCCAcqgAwIBAgIJAMcdoiKyV98cMAsGCWCGSAFlAwQDAjAiMRAwDgYDVQQD\nDAdEU0EgNTEyMQ4wDAYDVQQKDAVXZWJDQTAeFw0xODA1MjcxMDI1MjBaFw0xODA2\nMjYxMDI1MjBaMCIxEDAOBgNVBAMMB0RTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMIHw\nMIGoBgcqhkjOOAQBMIGcAkEAnogScrza9M5nFogjwu7MUSgOeWRfHSFWKLiFxfkN\nOAb1Z5oXTUKRcKdSxfI1zu47rvyqV6+4SSkQEsVJ2/7DQQIVANuQv4L3sp8AiUn+\nmwCyXhedQl2ZAkBfCDLU4nx7OeMx+vD9MN7FW57pHm/43B1Tu/cUOWcp5VHPJRuV\nWJqINIteY/0ilFEUCMibgol8Upj6CGnuDpvTA0MAAkAbnRx76A8r+o/3I5hlrlAm\nCi68uiiqW6W2R40U2g/KlIiafMEQ3+OrMwwkPX0aaJwa8m7lsUlmhhYOXu5p4fL1\no1AwTjAdBgNVHQ4EFgQUHub1qPkaKCtkQbmu3RnLaa8QAP4wHwYDVR0jBBgwFoAU\nHub1qPkaKCtkQbmu3RnLaa8QAP4wDAYDVR0TBAUwAwEB/zALBglghkgBZQMEAwID\nMQAwLgIVAMOEZCvJoNjIMzbH0yWrEUS6IxywAhUAzDhkGKvAH1V3o2ZsJsIotFUk\nIiQ=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_DSA)", "def load_cert_der_string(string):\n bio = BIO.MemoryBuffer(string)\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def main():\n ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'\n #cert_file_name = os.path.join(os.path.dirname(__file__), \"testcert.pem\")\n\n parser = argparse.ArgumentParser(description='Parse a certificate and show days left')\n parser.add_argument('-v', '--verbose', action='store_true', help='show full certificate')\n parser.add_argument('cert', nargs='+', help='certifcate file(s)')\n args = parser.parse_args()\n for cert_file_name in args.cert:\n try:\n cert_dict = ssl._ssl._test_decode_cert(cert_file_name)\n serial = cert_dict['serialNumber']\n subject = dict(x[0] for x in cert_dict['subject'])\n issued_to = subject['commonName']\n time_left = datetime.datetime.strptime(cert_dict['notAfter'], ssl_date_fmt) - datetime.datetime.utcnow()\n if args.verbose:\n pp(cert_dict)\n ssl_expires_in(issued_to, serial, time_left)\n\n except Exception as error:\n print(\"Error decoding certificate: {:}\".format(error))", "def parse_dict(self, data):\n self.public_key = VerifyingKey(data['public_key'])\n self.signature = base58.b58decode(data['signature']) if data['signature'] else None", "def fetch_x509_svid(self) -> X509Svid:", "def _unarmor(pem_bytes):\n\n if not isinstance(pem_bytes, byte_cls):\n raise TypeError(unwrap(\n '''\n pem_bytes must be a byte string, not %s\n ''',\n _type_name(pem_bytes)\n ))\n\n # Valid states include: \"trash\", \"headers\", \"body\"\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n\n found_start = False\n found_end = False\n\n for line in pem_bytes.splitlines(False):\n if line == b'':\n continue\n\n if state == \"trash\":\n # Look for a starting line since some CA cert bundle show the cert\n # into in a parsed format above each PEM block\n type_name_match = re.match(b'^(?:---- |-----)BEGIN ([A-Z0-9 ]+)(?: ----|-----)', line)\n if not type_name_match:\n continue\n object_type = type_name_match.group(1).decode('ascii')\n\n found_start = True\n state = 'headers'\n continue\n\n if state == 'headers':\n if line.find(b':') == -1:\n state = 'body'\n else:\n decoded_line = line.decode('ascii')\n name, value = decoded_line.split(':', 1)\n headers[name] = value.strip()\n continue\n\n if state == 'body':\n if line[0:5] in (b'-----', b'---- '):\n der_bytes = base64.b64decode(base64_data)\n\n yield (object_type, headers, der_bytes)\n\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n found_end = True\n continue\n\n base64_data += line\n\n if not found_start or not found_end:\n raise ValueError(unwrap(\n '''\n pem_bytes does not appear to contain PEM-encoded data - no\n BEGIN/END combination found\n '''\n ))", "def parse_aprs (packet):\n\n print (packet)\n if len(packet) == 0:\n return\n\n chan = ''\n # Split into address and information parts.\n # There could be a leading '[n]' with a channel number.\n m = re.search (r'^(\\[.+\\] *)?([^:]+):(.+)$', packet)\n if m:\n chan = m.group(1)\t# Still enclosed in [].\n addrs = m.group(2)\n info = m.group(3)\n #print ('<>'+addrs+'<>'+info+'<>')\n\n if info[0] == '}':\n # Unwrap third party traffic format\n # Preserve any channel.\n if chan:\n parse_aprs (chan + info[1:])\n else:\n parse_aprs (info[1:])\n elif info[0:3] == '{DE':\n # APRS \"user defined data\" format for EAS.\n #print ('Process \"message\" - ' + info)\n process_eas (chan, info[3:])\n else:\n print ('Not APRS \"user defined data\" format - ' + info)\n else:\n print ('Could not split into address & info parts - ' + packet)", "def parse_authenticator_data(val: bytes) -> AuthenticatorData:\n # Don't bother parsing if there aren't enough bytes for at least:\n # - rpIdHash (32 bytes)\n # - flags (1 byte)\n # - signCount (4 bytes)\n if len(val) < 37:\n raise InvalidAuthenticatorDataStructure(\n f\"Authenticator data was {len(val)} bytes, expected at least 37 bytes\"\n )\n\n pointer = 0\n\n rp_id_hash = val[pointer:32]\n pointer += 32\n\n # Cast byte to ordinal so we can use bitwise operators on it\n flags_bytes = ord(val[pointer : pointer + 1])\n pointer += 1\n\n sign_count = val[pointer : pointer + 4]\n pointer += 4\n\n # Parse flags\n flags = AuthenticatorDataFlags(\n up=flags_bytes & (1 << 0) != 0,\n uv=flags_bytes & (1 << 2) != 0,\n be=flags_bytes & (1 << 3) != 0,\n bs=flags_bytes & (1 << 4) != 0,\n at=flags_bytes & (1 << 6) != 0,\n ed=flags_bytes & (1 << 7) != 0,\n )\n\n # The value to return\n authenticator_data = AuthenticatorData(\n rp_id_hash=rp_id_hash,\n flags=flags,\n sign_count=int.from_bytes(sign_count, \"big\"),\n )\n\n # Parse AttestedCredentialData if present\n if flags.at is True:\n aaguid = val[pointer : pointer + 16]\n pointer += 16\n\n credential_id_len = int.from_bytes(val[pointer : pointer + 2], \"big\")\n pointer += 2\n\n credential_id = val[pointer : pointer + credential_id_len]\n pointer += credential_id_len\n\n \"\"\"\n Some authenticators incorrectly compose authData when using EdDSA for their public keys.\n A CBOR \"Map of 3 items\" (0xA3) should be \"Map of 4 items\" (0xA4), and if we manually adjust\n the single byte there's a good chance the authData can be correctly parsed. Let's try to\n detect when this happens and gracefully handle it.\n \"\"\"\n # Decodes to `{1: \"OKP\", 3: -8, -1: \"Ed25519\"}` (it's missing key -2 a.k.a. COSEKey.X)\n bad_eddsa_cbor = bytearray.fromhex(\"a301634f4b500327206745643235353139\")\n # If we find the bytes here then let's fix the bad data\n if val[pointer : pointer + len(bad_eddsa_cbor)] == bad_eddsa_cbor:\n # Make a mutable copy of the bytes...\n _val = bytearray(val)\n # ...Fix the bad byte...\n _val[pointer] = 0xA4\n # ...Then replace `val` with the fixed bytes\n val = bytes(_val)\n\n # Load the next CBOR-encoded value\n credential_public_key = cbor2.loads(val[pointer:])\n credential_public_key_bytes = cbor2.dumps(credential_public_key)\n pointer += len(credential_public_key_bytes)\n\n attested_cred_data = AttestedCredentialData(\n aaguid=aaguid,\n credential_id=credential_id,\n credential_public_key=credential_public_key_bytes,\n )\n authenticator_data.attested_credential_data = attested_cred_data\n\n if flags.ed is True:\n extension_object = cbor2.loads(val[pointer:])\n extension_bytes = cbor2.dumps(extension_object)\n pointer += len(extension_bytes)\n authenticator_data.extensions = extension_bytes\n\n # We should have parsed all authenticator data by this point\n if (len(val) > pointer):\n raise InvalidAuthenticatorDataStructure(\n \"Leftover bytes detected while parsing authenticator data\"\n )\n\n return authenticator_data", "def pfx2pem_memmory(input_file):\n pfx = open(input_file, 'rb').read()\n p12 = crypto.load_pkcs12(pfx)\n pem = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pem += crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n return pem", "def ipv4_asn(self):\n with open(self.netixlan) as f:\n netixlan = json.load(f)\n for item in netixlan['data']:\n if 'ipaddr4' in item and item['ipaddr4'] != None:\n yield (item['ipaddr4'], item['asn'])", "def check_dnssec(text):\n\n try:\n from dns.exception import DNSException\n import dns.dnssec\n import dns.rrset\n import Crypto.PublicKey.RSA\n #import ecdsa.ecdsa\n except ImportError:\n sys.exit(\"Problem importing DNSPython or supporting crypto packages, are they installed?\")\n\n wired_ttl = \"3600\"\n wired_rdclass = \"IN\"\n\n rrs = {}\n\n for line in text.splitlines():\n\n try:\n name, ttl, rdclass, rdtype, rdata = line.split(None, 4)\n except ValueError:\n continue\n\n if ttl != wired_ttl or rdclass != wired_rdclass:\n continue\n\n try:\n rrs[name, rdtype].append(rdata)\n except KeyError:\n rrs[name, rdtype] = [rdata]\n\n # Done parsing. We expect to have seen an A RRset, an RRSIG of that\n # A RRset, and the DNSKEY that we'll need to verify the RRSIG.\n\n if len(rrs) != 3:\n sys.exit(\"Expected two RRsets and an RRSIG, got %r\" % rrs)\n\n rrs = dict((rdtype, dns.rrset.from_text_list(name, int(wired_ttl), wired_rdclass, rdtype, rrs[name, rdtype]))\n for name, rdtype in rrs)\n\n try:\n dns.dnssec.validate(rrs[\"A\"], rrs[\"RRSIG\"], { rrs[\"DNSKEY\"].name : rrs[\"DNSKEY\"] })\n except DNSException, e:\n sys.exit(\"DNSSEC verification failed: %s\" % e)\n\n sys.stdout.write(\"\\nDNSSEC verification successful!\\n\\n\")", "def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)", "def info(self, fp):\n keys = (\n (\"cas.meta.compression\", CAS._convert_meta),\n (\"cas.meta.lib\", CAS._convert_meta),\n (\"cas.meta.fp_algo\", CAS._convert_meta),\n (\"cas.meta.orig_size\", CAS._convert_meta),\n (\"cas.refcount\", CAS._convert_refcount),\n )\n\n return {key: conv(self.ioctx.get_xattr(fp, key))\n for key, conv in keys}", "def read_chain_pair(private_key, certificates):\n with open(private_key, 'rb') as f:\n private_key = f.read()\n with open(certificates, 'rb') as f:\n certificates = f.read()\n return (private_key, certificates)", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def do_test_dig_sig(self, hashtype):\n\n if hashtype == HashTypes.SHA1:\n sha = hashes.SHA1\n elif hashtype == HashTypes.SHA2:\n sha = hashes.SHA256\n sk_priv = rsa.generate_private_key(\n public_exponent=65537,\n key_size=1024, # cheap key for testing\n backend=default_backend())\n sk_ = sk_priv.public_key()\n\n print(\"WARNING: cannot use hashlib's sha code with pyca cryptography\")\n print(\"WARNING: pyca cryptography does not support sha3/keccak\")\n\n signer = sk_priv.signer(\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n\n count = 64 + self.rng.next_int16(192) # [64..256)\n data = bytes(self.rng.some_bytes(count))\n\n signer.update(data)\n signature = signer.finalize() # a binary value; bytes\n\n # BEGIN interlude: conversion to/from base64, w/ 76-byte lines\n b64sig = base64.encodebytes(signature).decode('utf-8')\n sig2 = base64.decodebytes(b64sig.encode('utf-8'))\n self.assertEqual(sig2, signature)\n # END interlude ---------------------------------------------\n\n verifier = sk_.verifier(\n signature,\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n verifier.update(data)\n\n try:\n verifier.verify()\n # digital signature verification succeeded\n except InvalidSignature:\n self.fail(\"dig sig verification unexpectedly failed\")\n\n # twiddle a random byte in data array to make verification fail\n data2 = bytearray(data)\n which = self.rng.next_int16(count)\n data2[which] = 0xff & ~data2[which]\n data3 = bytes(data2)\n\n verifier = sk_.verifier(\n signature, # same digital signature\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n verifier.update(data3)\n\n try:\n verifier.verify()\n self.fail(\"expected verification of modified message to fail\")\n\n except InvalidSignature:\n pass # digital signature verification failed", "def _get_key_pair_from_sk(sk: ecdsa.SigningKey) -> typing.Tuple[bytes, bytes]:\n return sk.to_string(), \\\n sk.verifying_key.to_string(\"compressed\")", "def main(argv):\n\n\n parser = argparse.ArgumentParser(description='convert der to raw')\n parser.add_argument('-s','--secretkey_file', help='Secret key', required=True)\n parser.add_argument('-p','--publickey_file', help='Public key', required=True)\n args = parser.parse_args()\n\n secretkey_file = args.secretkey_file\n publickey_file = args.publickey_file\n\n\n privkey = SigningKey.from_der(open(secretkey_file).read())\n pubkey = VerifyingKey.from_der(open(publickey_file).read())\n\n open(secretkey_file[0:-4] + \".bin\", \"wb\").write(privkey.to_string())\n open(publickey_file[0:-4] + \".bin\", \"wb\").write(pubkey.to_string())", "def get_rsa_asymn_keys(public_exponent = 65537, key_size = 2048, bc = backend):\n\tprivate_key = asymmetric.rsa.generate_private_key(public_exponent = public_exponent, key_size = key_size, backend = bc)\n\treturn private_key,private_key.public_key()", "def cert_info(user, course):\r\n if not course.may_certify():\r\n return {}\r\n\r\n return _cert_info(user, course, certificate_status_for_student(user, course.id))", "def get_cert_content(certificate):\n cert_object = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)\n cert_content = crypto.dump_certificate(crypto.FILETYPE_TEXT, cert_object)\n return cert_content", "def read_snac(snac):\n if snac.endswith(\".snac\"):\n path = snac\n else:\n path = snac + \".snac\"\n d = {}\n with open(path) as f:\n last = None\n for l in f:\n if l.startswith(\"#\"): continue\n if \":\" in l:\n key, val = l.split(\":\")\n last = key.strip()\n val = val.strip()\n else:\n val = l.strip()\n if last:\n t = d.setdefault(last, [])\n t.append(val)\n else:\n print(\"Unexpected input: \", last)\n for a in d:\n d[a] = \" \".join(d[a])\n return d", "def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)", "def Read(key):\n dsa = json.loads(key)\n pub = DsaPublicKey.Read(json.dumps(dsa['publicKey']))\n params = {'x': util.Base64WSDecode(dsa['x'])}\n key = DSA.construct((util.BytesToLong(pub._params['y']),\n util.BytesToLong(pub._params['g']),\n util.BytesToLong(pub._params['p']),\n util.BytesToLong(pub._params['q']),\n util.BytesToLong(params['x'])))\n return DsaPrivateKey(params, pub, key, dsa['size'])", "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)", "def extractParamsFromKey(key: str) -> []:\n l = base64.b64decode(key).decode('ascii')\n \n param1 = l.split('\\n')[0]\n param2 = l.split('\\n')[1]\n #convert back to int\n param1 = int(param1, 16)\n param2 = int(param2, 16)\n \n if args.verbose : print(param1,param2)\n return [param1,param2]", "def _parse(self, content):\n result = TincConfParser.conf_file.parseString(to_unicode(content))\n for entry in result.get(\"entries\", []):\n self[entry[0]] = entry[1]\n keys = result.get(\"keys\", [])\n if keys:\n if len(keys) > 1:\n raise ParserError(\"Hostfile specifies more than one public key!\")\n self.rsa_public_key = '\\n'.join(keys[0])\n old_keys = result.get(\"old_keys\", [])\n for old_key in old_keys:\n self.old_public_keys.append('\\n'.join(old_key))", "def opensslCmsDataCreate( conveyedInfoFile ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-data_create\", \"-in\", conveyedInfoFile,\n \"-outform\", \"der\" ]\n conveyedInfoCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsDerBase64", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def getSigInfo(hdr):\n \n locale.setlocale(locale.LC_ALL, 'C')\n string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|'\n siginfo = hdr.sprintf(string)\n if siginfo != '(none)':\n error = 0 \n sigtype, sigdate, sigid = siginfo.split(',')\n else:\n error = 101\n sigtype = 'MD5'\n sigdate = 'None'\n sigid = 'None'\n \n infotuple = (sigtype, sigdate, sigid)\n return error, infotuple", "def load_from_existing(self, obj):\n self.subject = self.extract_name(obj.subject)\n\n for ext in obj.extensions:\n crit = ext.critical\n extobj = ext.value\n if ext.oid == ExtensionOID.BASIC_CONSTRAINTS:\n if not crit:\n raise InvalidCertificate(\"BASIC_CONSTRAINTS must be critical\")\n self.ca = extobj.ca\n self.path_length = None\n if self.ca:\n self.path_length = extobj.path_length\n elif ext.oid == ExtensionOID.KEY_USAGE:\n if not crit:\n raise InvalidCertificate(\"KEY_USAGE must be critical\")\n self.usage += self.extract_key_usage(extobj)\n elif ext.oid == ExtensionOID.SUBJECT_ALTERNATIVE_NAME:\n self.san = self.extract_gnames(extobj)\n elif ext.oid == ExtensionOID.EXTENDED_KEY_USAGE:\n self.usage += self.extract_xkey_usage(extobj)\n elif ext.oid == ExtensionOID.AUTHORITY_INFORMATION_ACCESS:\n for ad in extobj:\n if not isinstance(ad.access_location, x509.UniformResourceIdentifier):\n InvalidCertificate(\"Unsupported access_location: %s\" % (ad.access_location,))\n url = as_unicode(ad.access_location.value)\n\n if ad.access_method == AuthorityInformationAccessOID.CA_ISSUERS:\n self.issuer_urls.append(url)\n elif ad.access_method == AuthorityInformationAccessOID.OCSP:\n self.ocsp_urls.append(url)\n else:\n raise InvalidCertificate(\"Unsupported access_method: %s\" % (ad.access_method,))\n elif ext.oid == ExtensionOID.CRL_DISTRIBUTION_POINTS:\n for dp in extobj:\n if dp.relative_name:\n raise InvalidCertificate(\"DistributionPoint.relative_name not supported\")\n if dp.crl_issuer:\n raise InvalidCertificate(\"DistributionPoint.crl_issuer not supported\")\n if dp.reasons:\n raise InvalidCertificate(\"DistributionPoint.reasons not supported\")\n\n for gn in self.extract_gnames(dp.full_name):\n if gn.startswith('uri:'):\n self.crl_urls.append(gn[4:])\n else:\n raise InvalidCertificate(\"Unsupported DistributionPoint: %s\" % (gn,))\n elif ext.oid == ExtensionOID.NAME_CONSTRAINTS:\n self.permit_subtrees = self.extract_gnames(extobj.permitted_subtrees)\n self.exclude_subtrees = self.extract_gnames(extobj.excluded_subtrees)\n elif ext.oid == ExtensionOID.SUBJECT_KEY_IDENTIFIER:\n pass\n elif ext.oid == ExtensionOID.AUTHORITY_KEY_IDENTIFIER:\n pass\n elif ext.oid == ExtensionOID.OCSP_NO_CHECK:\n self.ocsp_nocheck = True\n elif ext.oid == ExtensionOID.TLS_FEATURE:\n for tls_feature_code in extobj:\n if tls_feature_code == x509.TLSFeatureType.status_request:\n self.ocsp_must_staple = True\n elif tls_feature_code == x509.TLSFeatureType.status_request_v2:\n self.ocsp_must_staple_v2 = True\n else:\n raise InvalidCertificate(\"Unsupported TLSFeature: %r\" % (tls_feature_code,))\n else:\n raise InvalidCertificate(\"Unsupported extension in CSR: %s\" % (ext,))", "def parse_kiss(self):\n frame_len = len(self.frame)\n\n if frame_len < 16:\n self._logger.debug('Frame len(%s) < 16, Exiting.', frame_len)\n return\n\n for raw_slice in range(0, frame_len):\n\n # Is address field length correct?\n # Find the first ODD Byte followed by the next boundary:\n if (ord(self.frame[raw_slice]) & 0x01\n and ((raw_slice + 1) % 7) == 0):\n\n i = (raw_slice + 1) / 7\n\n # Less than 2 callsigns?\n if 1 < i < 11:\n # For frames <= 70 bytes\n if frame_len >= raw_slice + 2:\n if (ord(self.frame[raw_slice + 1]) & 0x03 == 0x03 and\n ord(self.frame[raw_slice + 2]) in\n [0xf0, 0xcf]):\n self._extract_kiss_text(raw_slice)\n self._extract_kiss_destination()\n self._extract_kiss_source()\n self._extract_kiss_path(i)", "def _parse_certificate(cls, response):\n links = _parse_header_links(response)\n try:\n cert_chain_uri = links[u'up'][u'url']\n except KeyError:\n cert_chain_uri = None\n return (\n response.content()\n .addCallback(\n lambda body: messages.CertificateResource(\n uri=cls._maybe_location(response),\n cert_chain_uri=cert_chain_uri,\n body=body))\n )", "def _unarmor_pem_openssl_private(headers, data, password):\n\n enc_algo = None\n enc_iv_hex = None\n enc_iv = None\n\n if 'DEK-Info' in headers:\n params = headers['DEK-Info']\n if params.find(',') != -1:\n enc_algo, enc_iv_hex = params.strip().split(',')\n else:\n enc_algo = 'RC4'\n\n if not enc_algo:\n return data\n\n if enc_iv_hex:\n enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))\n enc_algo = enc_algo.lower()\n\n enc_key_length = {\n 'aes-128-cbc': 16,\n 'aes-128': 16,\n 'aes-192-cbc': 24,\n 'aes-192': 24,\n 'aes-256-cbc': 32,\n 'aes-256': 32,\n 'rc4': 16,\n 'rc4-64': 8,\n 'rc4-40': 5,\n 'rc2-64-cbc': 8,\n 'rc2-40-cbc': 5,\n 'rc2-cbc': 16,\n 'rc2': 16,\n 'des-ede3-cbc': 24,\n 'des-ede3': 24,\n 'des3': 24,\n 'des-ede-cbc': 16,\n 'des-cbc': 8,\n 'des': 8,\n }[enc_algo]\n\n enc_key = hashlib.md5(password + enc_iv[0:8]).digest()\n while enc_key_length > len(enc_key):\n enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()\n enc_key = enc_key[0:enc_key_length]\n\n enc_algo_name = {\n 'aes-128-cbc': 'aes',\n 'aes-128': 'aes',\n 'aes-192-cbc': 'aes',\n 'aes-192': 'aes',\n 'aes-256-cbc': 'aes',\n 'aes-256': 'aes',\n 'rc4': 'rc4',\n 'rc4-64': 'rc4',\n 'rc4-40': 'rc4',\n 'rc2-64-cbc': 'rc2',\n 'rc2-40-cbc': 'rc2',\n 'rc2-cbc': 'rc2',\n 'rc2': 'rc2',\n 'des-ede3-cbc': 'tripledes',\n 'des-ede3': 'tripledes',\n 'des3': 'tripledes',\n 'des-ede-cbc': 'tripledes',\n 'des-cbc': 'des',\n 'des': 'des',\n }[enc_algo]\n decrypt_func = crypto_funcs[enc_algo_name]\n\n if enc_algo_name == 'rc4':\n return decrypt_func(enc_key, data)\n\n return decrypt_func(enc_key, data, enc_iv)", "def extract_ca_private_key_bytes_from_pem(pem_content):\n found_marker = False\n for begin_marker in [constants.BEGIN_PRIVATE_KEY_MARKER,\n constants.BEGIN_RSA_PRIVATE_KEY_MARKER]:\n begin_search = pem_content.find(begin_marker)\n if begin_search >= 0:\n found_marker = True\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n found_marker = False\n for end_marker in [constants.END_PRIVATE_KEY_MARKER,\n constants.END_RSA_PRIVATE_KEY_MARKER]:\n end_search = pem_content.find(end_marker)\n if end_search >= 0:\n found_marker = True\n end_search += len(end_marker)\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n base64_key = base64.encode_as_text(pem_content[begin_search:end_search])\n return base64_key", "def parse_from_bytes(self, raw_buffer):\n\n try:\n (cpu_svn,\n self.misc_select,\n _,\n attributes,\n mr_enclave,\n _,\n mr_signer,\n _,\n self.isv_prod_id,\n self.isv_svn,\n _,\n report_data) = \\\n struct.unpack(self._format, raw_buffer)\n\n # Further parse embedded structures\n self.cpu_svn.parse_from_bytes(cpu_svn)\n self.attributes.parse_from_bytes(attributes)\n self.mr_enclave.parse_from_bytes(mr_enclave)\n self.mr_signer.parse_from_bytes(mr_signer)\n self.report_data.parse_from_bytes(report_data)\n except struct.error as se:\n raise ValueError('Unable to parse: {}'.format(se))", "def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def ft_seal_and_unseal():\n print \"generating key pair\"\n pubkey_pem, privkey_pem = api.generate_key_pair( 4096 )\n \n sealed_buf = create_sealed_and_signed_blob( privkey_pem, \"foo\", \"hello world\")\n print \"sealed data is:\\n\\n%s\\n\\n\" % sealed_buf\n\n buf = verify_and_unseal_blob( pubkey_pem, \"foo\", sealed_buf )\n print \"unsealed data is: \\n\\n%s\\n\\n\" % buf", "def Read(key):\n dsa = json.loads(key)\n pub = DsaPublicKey.Read(json.dumps(dsa['publicKey']))\n params = { 'x' : util.Decode(dsa['x']) }\n key = DSA.construct((util.BytesToLong(pub._params['y']),\n util.BytesToLong(pub._params['g']),\n util.BytesToLong(pub._params['p']),\n util.BytesToLong(pub._params['q']),\n util.BytesToLong(params['x'])))\n return DsaPrivateKey(params, pub, key, dsa['size'])", "def extract_pkginfo(package):\n with tarfile.open(package, mode='r|*', encoding='utf-8') as tar:\n # Manual seeking to find .PKGINFO without having to uncompress the whole package\n while True:\n f = tar.next()\n if f.name == '.PKGINFO':\n break\n pkginfo = tar.extractfile(f).readlines()\n # Parse .PKGINFO\n res = dict()\n for line in pkginfo:\n m = re.match(r'([^=]*) = (.*)', line.decode('utf8'))\n if m:\n # TODO: support multi-valued attributes\n key, value = m[1], m[2].strip()\n res[key] = value\n return res", "def fetch_cert(source, entry, s3_client):\n if source == \"s3\":\n bucket_and_key = parse_s3_url(entry)\n logger.info(\"...reading s3 source = {}\".format(bucket_and_key))\n pem_cert = s3_client.get_object(\n Bucket=bucket_and_key[\"bucket\"], Key=bucket_and_key[\"key\"]\n )\n pem_cert_body = pem_cert[\"Body\"].read()\n elif source == \"memory\":\n logger.info(\"...reading from memory\")\n pem_cert_body = entry\n else:\n raise ValueError(\n \"Invalid cert entry type {}, \" \"must be one of s3, memory\".format(source)\n )\n\n # Python3 will return a byte string, Python2 will return a string\n if type(pem_cert_body) == bytes:\n pem_cert_body = pem_cert_body.decode(\"utf-8\")\n\n return pem_cert_body", "def dePem(s, name):\r\n prefix = \"-----BEGIN %s-----\" % name\r\n postfix = \"-----END %s-----\" % name \r\n start = s.find(prefix)\r\n if start == -1:\r\n raise SyntaxError(\"Missing PEM prefix\")\r\n end = s.find(postfix, start+len(prefix))\r\n if end == -1:\r\n raise SyntaxError(\"Missing PEM postfix\")\r\n s = s[start+len(\"-----BEGIN %s-----\" % name) : end]\r\n retBytes = a2b_base64(s) # May raise SyntaxError\r\n return retBytes", "def test_dsa_ca(self):\n key = c.KEY_DSA\n usage = [\n c.KU_DIGITALSIGNATURE,\n c.KU_NONREPUDIATION,\n c.KU_KEYCERTSIGN,\n c.KU_CRLSIGN,\n ]\n self.assertTrue(utils.check_key_usage(key, usage, True))", "def processResponse(token, enc_key, sig_key):\n payload = []\n # Decrypt encrypted token (JWE).\n enc = jwe.JWE()\n enc.deserialize(token, key=enc_key)\n payload.append(enc.payload.decode(\"utf-8\"))\n # This again contains a signed token (JWS), so we deserialize it and verify the signature.\n sig = jws.JWS()\n sig.deserialize(payload[0])\n sig.verify(sig_key)\n payload.append(sig.payload.decode(\"utf-8\"))\n return payload", "def extractPublicKey(cert):\n pk = cert.get_pubkey()\n\n b = _util.binding\n l = b.lib\n ffi = b.ffi\n rsa = l.EVP_PKEY_get1_RSA(pk._pkey)\n buf = ffi.new(\"unsigned char **\")\n length = l.i2d_RSA_PUBKEY(rsa, buf)\n pk = ffi.buffer(buf[0], length)[:]\n ffi.gc(buf[0], l.OPENSSL_free)\n return pk", "def parse_response(xml):\n r = {}\n\n try:\n xml = etree.fromstring(xml)\n for key in xml.keys():\n value = xml.get(key)\n r.update({key:value})\n except etree.Error as e:\n raise XMLParsingError(u'Failed to parse response from CardPay service: {}'.format(e))\n\n return r", "def decode_csr(self, pem_csr):\n pem_csr = pem_csr.encode(encoding='UTF-8')\n return x509.load_pem_x509_csr(pem_csr, default_backend())", "def verify_cert(public_key, cert):\n try:\n public_key.verify(\n signature=cert.signature,\n data=cert.tbs_certificate_bytes,\n signature_algorithm=ec.ECDSA(cert.signature_hash_algorithm)\n )\n except:\n return 'failure'\n\n return 'success'" ]
[ "0.60229003", "0.5695678", "0.5526086", "0.54503", "0.5424802", "0.51653993", "0.51222205", "0.5089913", "0.5076887", "0.50647706", "0.5058496", "0.4928613", "0.4890625", "0.48888293", "0.48255894", "0.47993195", "0.47745132", "0.47668105", "0.47628716", "0.47628716", "0.47595042", "0.47550887", "0.47479302", "0.47212827", "0.46697128", "0.46628034", "0.46335277", "0.45957088", "0.45930824", "0.4584873", "0.45831445", "0.45600796", "0.4558702", "0.4555767", "0.45496717", "0.45361418", "0.4534125", "0.45154527", "0.4507464", "0.45073995", "0.44964185", "0.4491238", "0.44900322", "0.44723415", "0.44629943", "0.4459555", "0.445782", "0.4441848", "0.44270098", "0.44268104", "0.44217983", "0.4421062", "0.44061783", "0.44038355", "0.43948242", "0.43936536", "0.43884718", "0.43873525", "0.438192", "0.4373917", "0.43717223", "0.43673164", "0.4360474", "0.435417", "0.43480092", "0.434758", "0.4331975", "0.43218398", "0.43211603", "0.4319966", "0.4313969", "0.43047103", "0.4303277", "0.42803288", "0.4279195", "0.4278076", "0.42673117", "0.4266044", "0.42644078", "0.4252087", "0.42477497", "0.42403412", "0.42400464", "0.42383456", "0.42351124", "0.4235017", "0.42185834", "0.4218086", "0.42165548", "0.4207639", "0.42070463", "0.42012912", "0.42005432", "0.41970053", "0.4193446", "0.4184861", "0.41848347", "0.4180863", "0.4180203", "0.41772497" ]
0.67407256
0
Parses a SafeContents PKCS12 ANS.1 structure and extracts certs and keys
def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key): if isinstance(safe_contents, byte_cls): safe_contents = SafeContents.load(safe_contents) for safe_bag in safe_contents: bag_value = safe_bag['bag_value'] if isinstance(bag_value, CertBag): if bag_value['cert_id'].native == 'x509': cert = bag_value['cert_value'].parsed public_key_info = cert['tbs_certificate']['subject_public_key_info'] certs[_fingerprint(public_key_info, None)] = bag_value['cert_value'].parsed elif isinstance(bag_value, PrivateKeyInfo): private_keys[_fingerprint(bag_value, load_private_key)] = bag_value elif isinstance(bag_value, EncryptedPrivateKeyInfo): encryption_algorithm_info = bag_value['encryption_algorithm'] encrypted_key_bytes = bag_value['encrypted_data'].native decrypted_key_bytes = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_key_bytes, password) private_key = PrivateKeyInfo.load(decrypted_key_bytes) private_keys[_fingerprint(private_key, load_private_key)] = private_key elif isinstance(bag_value, SafeContents): _parse_safe_contents(bag_value, certs, private_keys, password, load_private_key) else: # We don't care about CRL bags or secret bags pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_pkcs12(data, password, load_private_key):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n certs = {}\n private_keys = {}\n\n pfx = Pfx.load(data)\n\n auth_safe = pfx['auth_safe']\n if auth_safe['content_type'].native != 'data':\n raise ValueError(pretty_message(\n '''\n Only password-protected PKCS12 files are currently supported\n '''\n ))\n authenticated_safe = pfx.authenticated_safe\n\n mac_data = pfx['mac_data']\n if mac_data:\n mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native\n key_length = {\n 'sha1': 20,\n 'sha224': 28,\n 'sha256': 32,\n 'sha384': 48,\n 'sha512': 64,\n 'sha512_224': 28,\n 'sha512_256': 32,\n }[mac_algo]\n mac_key = pkcs12_kdf(\n mac_algo,\n password,\n mac_data['mac_salt'].native,\n mac_data['iterations'].native,\n key_length,\n 3 # ID 3 is for generating an HMAC key\n )\n hash_mod = getattr(hashlib, mac_algo)\n computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest()\n stored_hmac = mac_data['mac']['digest'].native\n if not constant_compare(computed_hmac, stored_hmac):\n raise ValueError('Password provided is invalid')\n\n for content_info in authenticated_safe:\n content = content_info['content']\n\n if isinstance(content, OctetString):\n _parse_safe_contents(content.native, certs, private_keys, password, load_private_key)\n\n elif isinstance(content, EncryptedData):\n encrypted_content_info = content['encrypted_content_info']\n\n encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm']\n encrypted_content = encrypted_content_info['encrypted_content'].native\n decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)\n\n _parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key)\n\n else:\n raise ValueError(pretty_message(\n '''\n Public-key-based PKCS12 files are not currently supported\n '''\n ))\n\n key_fingerprints = set(private_keys.keys())\n cert_fingerprints = set(certs.keys())\n\n common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))\n\n key = None\n cert = None\n other_certs = []\n\n if len(common_fingerprints) >= 1:\n fingerprint = common_fingerprints[0]\n key = private_keys[fingerprint]\n cert = certs[fingerprint]\n other_certs = [certs[f] for f in certs if f != fingerprint]\n return (key, cert, other_certs)\n\n if len(private_keys) > 0:\n first_key = sorted(list(private_keys.keys()))[0]\n key = private_keys[first_key]\n\n if len(certs) > 0:\n first_key = sorted(list(certs.keys()))[0]\n cert = certs[first_key]\n del certs[first_key]\n\n if len(certs) > 0:\n other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly)\n\n return (key, cert, other_certs)", "def parse_config(self, data):\n match = re.search(\"-----BEGIN RSA PRIVATE KEY-----.*\" + \\\n \"-----END RSA PRIVATE KEY-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Private key not found\")\n key = match.group()\n\n match = re.search(\"-----BEGIN CERTIFICATE-----.*\" + \\\n \"-----END CERTIFICATE-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Certificate not found\")\n cert = match.group()\n # config also contains allowed, dns, but we don't use that for GCMU\n return (cert, key)", "def verify(self, ta):\n\n try:\n cms = self.get_POW()\n except:\n if self.print_on_der_error:\n logger.debug(\"Problem parsing DER CMS message, might not really be DER: %r\",\n self.get_DER())\n raise rpki.exceptions.UnparsableCMSDER\n\n if cms.eContentType() != self.econtent_oid:\n raise rpki.exceptions.WrongEContentType(\"Got CMS eContentType %s, expected %s\" % (\n cms.eContentType(), self.econtent_oid))\n\n certs = [X509(POW = x) for x in cms.certs()]\n crls = [CRL(POW = c) for c in cms.crls()]\n\n if self.debug_cms_certs:\n for x in certs:\n logger.debug(\"Received CMS cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n for c in crls:\n logger.debug(\"Received CMS CRL issuer %r\", c.getIssuer())\n\n now = rpki.sundial.now()\n\n trusted_ee = None\n trusted_ca = []\n untrusted_ee = None\n\n for x in X509.normalize_chain(ta):\n if self.debug_cms_certs:\n logger.debug(\"CMS trusted cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n if x.getNotAfter() < now:\n raise rpki.exceptions.TrustedCMSCertHasExpired(\"Trusted CMS certificate has expired\",\n \"%s (%s)\" % (x.getSubject(), x.hSKI()))\n if x.is_CA():\n trusted_ca.append(x)\n else:\n if trusted_ee is None:\n trusted_ee = x\n else:\n raise rpki.exceptions.MultipleCMSEECert(\"Multiple CMS EE certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))\n\n if trusted_ee:\n if self.debug_cms_certs:\n logger.debug(\"Trusted CMS EE cert issuer %s subject %s SKI %s\",\n trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())\n if len(certs) > 1 or (len(certs) == 1 and\n (certs[0].getSubject() != trusted_ee.getSubject() or\n certs[0].getPublicKey() != trusted_ee.getPublicKey())):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n if crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n else:\n untrusted_ee = [x for x in certs if not x.is_CA()]\n if len(untrusted_ee) < 1:\n raise rpki.exceptions.MissingCMSEEcert\n if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n untrusted_ee = untrusted_ee[0]\n if len(crls) < 1:\n if self.require_crls:\n raise rpki.exceptions.MissingCMSCRL\n else:\n logger.warning(\"MISSING CMS CRL! Ignoring per self.require_crls setting\")\n if len(crls) > 1 and not self.allow_extra_crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n for x in certs:\n if x.getNotAfter() < now:\n raise rpki.exceptions.CMSCertHasExpired(\"CMS certificate has expired\", \"%s (%s)\" % (\n x.getSubject(), x.hSKI()))\n\n for c in crls:\n if c.getNextUpdate() < now:\n logger.warning(\"Stale BPKI CMS CRL (%s %s %s)\", c.getNextUpdate(), c.getIssuer(), c.hAKI())\n\n # XXX Verify certificate chain via X.509 machinery, not CMS\n # machinery. Awful mess due to history, needs cleanup, but\n # get it working again first.\n\n cert = (trusted_ee or untrusted_ee).get_POW()\n\n cert.verify(trusted = (x.get_POW() for x in trusted_ca),\n crl = crls[0].get_POW() if untrusted_ee and crls else None)\n\n try:\n # XXX This isn't right yet, but let's test before gettting more complicated\n #\n # Aside from all the type and exception abominations, the\n # main problem here is that we're no longer verifying the\n # certificate chain, just the CMS signature. Certificate\n # verificaiton is a separate step under the new scheme,\n # and probably comes before this, but let's write down\n # what the problem is before it gets lost...\n\n content = cms.verify(certs = (x.get_POW() for x in X509.normalize_chain(ta)),\n flags = rpki.POW.CMS_NO_SIGNER_CERT_VERIFY)\n except:\n if self.dump_on_verify_failure:\n if self.dump_using_dumpasn1:\n dbg = self.dumpasn1()\n else:\n dbg = cms.pprint()\n logger.warning(\"CMS verification failed, dumping ASN.1 (%d octets):\", len(self.get_DER()))\n for line in dbg.splitlines():\n logger.warning(line)\n\n # XXX Old code replaced rpki.POW exception with this. For\n # debugging I'd rather see what POW has to say; decide\n # later whether to keep this change.\n #\n #raise rpki.exceptions.CMSVerificationFailed(\"CMS verification failed\")\n raise\n\n return content", "def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if tbsCertificateP.value[0]==0xA0:\r\n subjectPublicKeyInfoIndex = 6\r\n else:\r\n subjectPublicKeyInfoIndex = 5\r\n\r\n #Get the subject\r\n self.subject = tbsCertificateP.getChildBytes(\\\r\n subjectPublicKeyInfoIndex - 1)\r\n\r\n #Get the subjectPublicKeyInfo\r\n subjectPublicKeyInfoP = tbsCertificateP.getChild(\\\r\n subjectPublicKeyInfoIndex)\r\n\r\n #Get the algorithm\r\n algorithmP = subjectPublicKeyInfoP.getChild(0)\r\n rsaOID = algorithmP.value\r\n if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:\r\n raise SyntaxError(\"Unrecognized AlgorithmIdentifier\")\r\n\r\n #Get the subjectPublicKey\r\n subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)\r\n\r\n #Adjust for BIT STRING encapsulation\r\n if (subjectPublicKeyP.value[0] !=0):\r\n raise SyntaxError()\r\n subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])\r\n\r\n #Get the modulus and exponent\r\n modulusP = subjectPublicKeyP.getChild(0)\r\n publicExponentP = subjectPublicKeyP.getChild(1)\r\n\r\n #Decode them into numbers\r\n n = bytesToNumber(modulusP.value)\r\n e = bytesToNumber(publicExponentP.value)\r\n\r\n #Create a public key instance\r\n self.publicKey = _createPublicRSAKey(n, e)", "def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List", "def ParsePkgInfo(contents, filename, valid_keys=None, required_keys=None):\n rtn = {}\n if valid_keys is None:\n valid_keys = VALID_KEYS\n if required_keys is None:\n required_keys = REQUIRED_KEYS\n\n def ParsePkgInfoLine(line, line_no):\n if '=' not in line:\n raise PkgFormatError('Invalid info line %s:%d' % (filename, line_no))\n key, value = line.split('=', 1)\n key = key.strip()\n if key not in valid_keys:\n raise PkgFormatError(\"Invalid key '%s' in info file %s:%d\" %\n (key, filename, line_no))\n value = value.strip()\n if value[0] == '(':\n if value[-1] != ')':\n raise PkgFormatError('Error parsing %s:%d: %s (%s)' %\n (filename, line_no, key, value))\n value = value[1:-1].split()\n else:\n value = shlex.split(value)[0]\n return (key, value)\n\n def ExpandVars(value, substitutions):\n if type(value) == str:\n return string.Template(value).substitute(substitutions)\n else:\n return [string.Template(v).substitute(substitutions) for v in value]\n\n for i, line in enumerate(contents.splitlines()):\n if not line or line[0] == '#':\n continue\n key, raw_value = ParsePkgInfoLine(line, i + 1)\n if key in rtn:\n raise PkgFormatError('Error parsing %s:%d: duplicate key (%s)' %\n (filename, i + 1, key))\n rtn[key] = ExpandVars(raw_value, rtn)\n\n for required_key in required_keys:\n if required_key not in rtn:\n raise PkgFormatError(\"Required key '%s' missing from info file: '%s'\" %\n (required_key, filename))\n\n return rtn", "def _extract_values_from_certificate(cert):\n logger = getLogger(__name__)\n # cert and serial number\n data = {\n u'cert': cert,\n u'issuer': cert.get_issuer().der(),\n u'serial_number': cert.get_serial_number(),\n u'algorithm': rfc2437.id_sha1,\n u'algorithm_parameter': univ.Any(hexValue='0500') # magic number\n }\n # DN Hash\n data[u'name'] = cert.get_subject()\n cert_der = data[u'name'].der()\n sha1_hash = hashlib.sha1()\n sha1_hash.update(cert_der)\n data[u'name_hash'] = sha1_hash.hexdigest()\n\n # public key Hash\n data['key_hash'] = _get_pubickey_sha1_hash(cert).hexdigest()\n\n # CRL and OCSP\n data['crl'] = None\n ocsp_uris0 = []\n for idx in range(cert.get_extension_count()):\n e = cert.get_extension(idx)\n if e.get_short_name() == b'authorityInfoAccess':\n for line in str(e).split(u\"\\n\"):\n m = OCSP_RE.match(line)\n if m:\n logger.debug(u'OCSP URL: %s', m.group(1))\n ocsp_uris0.append(m.group(1))\n elif e.get_short_name() == b'crlDistributionPoints':\n for line in str(e).split(u\"\\n\"):\n m = CRL_RE.match(line)\n if m:\n logger.debug(u\"CRL: %s\", m.group(1))\n data['crl'] = m.group(1)\n\n if len(ocsp_uris0) == 1:\n data['ocsp_uri'] = ocsp_uris0[0]\n elif len(ocsp_uris0) == 0:\n data['ocsp_uri'] = u''\n else:\n raise OperationalError(\n msg=u'More than one OCSP URI entries are specified in '\n u'the certificate',\n errno=ER_FAILED_TO_GET_OCSP_URI,\n )\n data[u'is_root_ca'] = cert.get_subject() == cert.get_issuer()\n return data", "def parse(self, xml_text):\n xml_doc = parse_doc(xml_text)\n data = findtext(xml_doc, \"Data\")\n if data is None:\n return\n\n cryptutil = CryptUtil(conf.get_openssl_cmd())\n p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME)\n p7m = (\"MIME-Version:1.0\\n\"\n \"Content-Disposition: attachment; filename=\\\"{0}\\\"\\n\"\n \"Content-Type: application/x-pkcs7-mime; name=\\\"{1}\\\"\\n\"\n \"Content-Transfer-Encoding: base64\\n\"\n \"\\n\"\n \"{2}\").format(p7m_file, p7m_file, data)\n\n self.client.save_cache(p7m_file, p7m)\n\n trans_prv_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_PRV_FILE_NAME)\n trans_cert_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_CERT_FILE_NAME)\n pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME)\n # decrypt certificates\n cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file,\n pem_file)\n\n # The parsing process use public key to match prv and crt.\n buf = []\n begin_crt = False\n begin_prv = False\n prvs = {}\n thumbprints = {}\n index = 0\n v1_cert_list = []\n with open(pem_file) as pem:\n for line in pem.readlines():\n buf.append(line)\n if re.match(r'[-]+BEGIN.*KEY[-]+', line):\n begin_prv = True\n elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line):\n begin_crt = True\n elif re.match(r'[-]+END.*KEY[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'prv', buf)\n pub = cryptutil.get_pubkey_from_prv(tmp_file)\n prvs[pub] = tmp_file\n buf = []\n index += 1\n begin_prv = False\n elif re.match(r'[-]+END.*CERTIFICATE[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'crt', buf)\n pub = cryptutil.get_pubkey_from_crt(tmp_file)\n thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file)\n thumbprints[pub] = thumbprint\n # Rename crt with thumbprint as the file name\n crt = \"{0}.crt\".format(thumbprint)\n v1_cert_list.append({\n \"name\": None,\n \"thumbprint\": thumbprint\n })\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt))\n buf = []\n index += 1\n begin_crt = False\n\n # Rename prv key with thumbprint as the file name\n for pubkey in prvs:\n thumbprint = thumbprints[pubkey]\n if thumbprint:\n tmp_file = prvs[pubkey]\n prv = \"{0}.prv\".format(thumbprint)\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv))\n\n for v1_cert in v1_cert_list:\n cert = Cert()\n set_properties(\"certs\", cert, v1_cert)\n self.cert_list.certificates.append(cert)", "def _parse(self, content):\n result = TincConfParser.conf_file.parseString(to_unicode(content))\n for entry in result.get(\"entries\", []):\n self[entry[0]] = entry[1]\n keys = result.get(\"keys\", [])\n if keys:\n if len(keys) > 1:\n raise ParserError(\"Hostfile specifies more than one public key!\")\n self.rsa_public_key = '\\n'.join(keys[0])\n old_keys = result.get(\"old_keys\", [])\n for old_key in old_keys:\n self.old_public_keys.append('\\n'.join(old_key))", "def extract_certs_from_pem(pem_contents):\n start = 0\n certs = []\n while True:\n index = pem_contents.find(constants.BEGIN_CERTIFICATE_MARKER, start)\n if index == -1:\n break\n try:\n cert = x509.load_pem_x509_certificate(pem_contents[index::],\n default_backend())\n except Exception:\n LOG.exception(_(\"Load pem x509 certificate failed at file \"\n \"location: %s\") % index)\n raise exception.SysinvException(_(\n \"Failed to load pem x509 certificate\"))\n\n certs.append(cert)\n start = index + len(constants.BEGIN_CERTIFICATE_MARKER)\n return certs", "def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self", "def extract(self):\n\n try:\n cms = self.get_POW()\n except:\n raise rpki.exceptions.UnparsableCMSDER\n\n if cms.eContentType() != self.econtent_oid:\n raise rpki.exceptions.WrongEContentType(\"Got CMS eContentType %s, expected %s\" % (\n cms.eContentType(), self.econtent_oid))\n\n return cms.verify(flags = (rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY |\n rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY))", "def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))", "def info(self, fp):\n keys = (\n (\"cas.meta.compression\", CAS._convert_meta),\n (\"cas.meta.lib\", CAS._convert_meta),\n (\"cas.meta.fp_algo\", CAS._convert_meta),\n (\"cas.meta.orig_size\", CAS._convert_meta),\n (\"cas.refcount\", CAS._convert_refcount),\n )\n\n return {key: conv(self.ioctx.get_xattr(fp, key))\n for key, conv in keys}", "def test_pkcs12_ordering():\n\n def make_cert(name):\n key = ec.generate_private_key(ec.SECP256R1())\n subject = x509.Name(\n [\n x509.NameAttribute(x509.NameOID.COMMON_NAME, name),\n ]\n )\n now = datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(subject)\n .public_key(key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(now)\n .not_valid_after(now)\n .sign(key, hashes.SHA256())\n )\n return (key, cert)\n\n # Make some certificates with distinct names.\n a_name = \"A\" * 20\n b_name = \"B\" * 20\n c_name = \"C\" * 20\n a_key, a_cert = make_cert(a_name)\n _, b_cert = make_cert(b_name)\n _, c_cert = make_cert(c_name)\n\n # Bundle them in a PKCS#12 file in order A, B, C.\n p12 = serialize_key_and_certificates(\n b\"p12\", a_key, a_cert, [b_cert, c_cert], serialization.NoEncryption()\n )\n\n # Parse them out. The API should report them in the same order.\n (key, cert, certs) = load_key_and_certificates(p12, None)\n assert cert == a_cert\n assert certs == [b_cert, c_cert]\n\n # The ordering in the PKCS#12 file itself should also match.\n a_idx = p12.index(a_name.encode(\"utf-8\"))\n b_idx = p12.index(b_name.encode(\"utf-8\"))\n c_idx = p12.index(c_name.encode(\"utf-8\"))\n\n assert a_idx < b_idx < c_idx", "def unpack_keys_from_xfer(key_pack_hex: hex,\n path=paths.nacl_keys,\n *args,\n **kwargs):\n global public_box\n\n try:\n key_dict = public_box.decrypt(key_pack_hex)\n key_dict = json.loads(key_dict)\n\n aes_key = key_dict[\"aes\"]\n AES256Cipher().write_key(aes_key.encode())\n\n fernet_key = key_dict[\"fernet\"]\n FernetCipher().write_key(fernet_key.encode())\n\n chacha_key = key_dict[\"chacha\"]\n XChaCha20Poly1305.write_key(Base64Encoder.decode(chacha_key))\n\n except:\n print(sysMsgList.keysUnpackFail)", "def get_cert_content(certificate):\n cert_object = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)\n cert_content = crypto.dump_certificate(crypto.FILETYPE_TEXT, cert_object)\n return cert_content", "def info_from_args(args):\n return CertInfo(\n subject=parse_dn(args.subject),\n usage=parse_list(args.usage),\n alt_names=parse_list(args.san),\n ocsp_nocheck=args.ocsp_nocheck,\n ocsp_must_staple=args.ocsp_must_staple,\n ocsp_must_staple_v2=args.ocsp_must_staple_v2,\n ocsp_urls=parse_list(args.ocsp_urls),\n crl_urls=parse_list(args.crl_urls),\n issuer_urls=parse_list(args.issuer_urls),\n permit_subtrees=parse_list(args.permit_subtrees),\n exclude_subtrees=parse_list(args.exclude_subtrees),\n ca=args.CA,\n path_length=args.path_length)", "def get_certinfo(doc):\n\n #set a two second default timeout to recieve a cert\n socket.setdefaulttimeout(2)\n doc['ssl'] = {} \n\n try:\n cert = ssl.get_server_certificate((doc['hostname'], 443))\n #sometimes certs come back as unicode so cast to str() aka ascii\n cert = M2Crypto.X509.load_cert_string(str(cert))\n\n except:\n syslog.syslog('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n print('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n #lets remove the ssl key and return the doc untouched\n doc.pop('ssl')\n return doc\n\n\n #get creation date\n doc['ssl']['created'] = cert.get_not_before().get_datetime().isoformat()\n #get not valid after, aka expiration data\n doc['ssl']['expire'] = cert.get_not_after().get_datetime().isoformat()\n #get issuer information\n doc['ssl']['issuer'] = cert.get_issuer().as_text()\n #get subject information\n doc['ssl']['subject'] = cert.get_subject().as_text()\n #get keysize, size() returns in bytes, so we multiply * 8 to get the number of bits\n doc['ssl']['keysize'] = cert.get_pubkey().size() * 8\n #get cert fingerprint for comparison\n doc['ssl']['fingerprint'] = cert.get_fingerprint()\n\n return doc", "def asn1_loads(asn1_str):\n\n # ASN.1 grammar\n identifier = pp.Word(pp.alphas + \"_\")\n assign = pp.Literal(\"::=\")\n # typedef = identifier.setName(\"typeref\") + assign + identifier.setName(\"basetype\")\n comment1 = pp.Literal(\"#\") + pp.originalTextFor(pp.SkipTo(pp.LineEnd()))\n # typelist = pp.OneOrMore(typedef)\n meta1 = pp.LineStart() + identifier + pp.Literal(\":\") + pp.SkipTo(pp.LineEnd()).setDebug()\n meta2 = pp.LineStart() + pp.White() + pp.SkipTo(pp.LineEnd()).setDebug()\n metaval = meta1 + pp.ZeroOrMore(meta2)\n # metalist = pp.ZeroOrMore(comment1) + pp.Literal(\"/*\") + pp.OneOrMore(metaval) + pp.Literal(\"*/\")\n metalist = pp.SkipTo(pp.Literal(\"/*\")).setDebug() + pp.Literal(\"/*\") + pp.OneOrMore(\n metaval).setDebug() + pp.Literal(\"*/\")\n\n asn1 = metalist.parseString(asn1_str, parseAll=False)\n print(asn1)\n jaen = {\"meta\": {}, \"types\": []}\n return jaen", "def parse(obj):\n data = json.loads(obj)\n cryptopars = init_crypto_args(**data)\n return cryptopars\n # return cipherdata(cryptopars, **data)", "def verify(self, ta):\n\n self.decode(CMS_object.verify(self, ta))\n return self.get_content()", "def _check_certificate(public_cert_content, priv_key_content,\n domain=None, at_time=None):\n result = {}\n # Read the private key and public certificate\n try:\n priv_key = OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, priv_key_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate_key': {\n 'state': 'invalid', 'detail': str(err)}})\n priv_key = None\n\n try:\n public_cert = OpenSSL.crypto.load_certificate(\n OpenSSL.crypto.FILETYPE_PEM, public_cert_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate': {\n 'state': 'invalid', 'detail': str(err)}})\n public_cert = None\n\n if priv_key and public_cert:\n context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n context.use_privatekey(priv_key)\n context.use_certificate(public_cert)\n try:\n context.check_privatekey()\n except OpenSSL.SSL.Error:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate does not match private key.\"}})\n\n if result:\n raise RuntimeError(result)\n\n not_after = public_cert.get_notAfter()\n if not isinstance(not_after, six.string_types):\n not_after = not_after.decode('utf-8')\n not_after = datetime.datetime.strptime(not_after, \"%Y%m%d%H%M%SZ\")\n common_name = public_cert.get_subject().commonName\n alt_names = []\n for ext_idx in range(0, public_cert.get_extension_count()):\n extension = public_cert.get_extension(ext_idx)\n if extension.get_short_name().decode('utf-8') == 'subjectAltName':\n # data of the X509 extension, encoded as ASN.1\n decoded_alt_names, _ = asn1_decoder(\n extension.get_data(), asn1Spec=SubjectAltName())\n for alt in nat_encoder(decoded_alt_names):\n alt_name = alt['dNSName'].decode('utf-8')\n if alt_name != common_name:\n alt_names += [alt_name]\n if domain:\n found = False\n for alt_name in [common_name] + alt_names:\n regex = alt_name.replace('.', r'\\.').replace('*', r'.*') + '$'\n if re.match(regex, domain) or alt_name == domain:\n found = True\n break\n if not found:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"domain name (%s) does not match common or alt names\"\\\n \" present in certificate (%s, %s).\" % (\n domain, common_name, ','.join(alt_names))}})\n if at_time:\n if not_after <= at_time:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate is only valid until %s.\" % not_after}})\n\n if result:\n raise RuntimeError(result)\n\n result.update({'ssl_certificate': {\n 'common_name': common_name,\n 'alt_names': alt_names,\n 'state': result.get('ssl_certificate', {}).get('state', 'valid'),\n 'issuer': public_cert.get_issuer().organizationName,\n 'ends_at': not_after.isoformat()}})\n return result", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def _ParseCertificateArguments(client, args):\n self_managed = None\n managed = None\n certificate_type = None\n if args.certificate:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.SELF_MANAGED\n certificate = files.ReadFileContents(args.certificate)\n private_key = files.ReadFileContents(args.private_key)\n self_managed = client.messages.SslCertificateSelfManagedSslCertificate(\n certificate=certificate, privateKey=private_key)\n if args.domains:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.MANAGED\n managed = client.messages.SslCertificateManagedSslCertificate(\n domains=args.domains)\n return certificate_type, self_managed, managed", "def test_rsa_ca(self):\n key = c.KEY_RSA\n usage = [\n c.KU_DIGITALSIGNATURE,\n c.KU_NONREPUDIATION,\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n c.KU_KEYCERTSIGN,\n c.KU_CRLSIGN,\n ]\n self.assertTrue(utils.check_key_usage(key, usage, True))", "def check_valid_request_ca(self):\n\n self.check_valid_request_common()\n\n alg = self.get_POW().getSignatureAlgorithm()\n bc = self.get_POW().getBasicConstraints()\n eku = self.get_POW().getEKU()\n sia = self.get_POW().getSIA()\n\n if alg != rpki.oids.sha256WithRSAEncryption:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 has bad signature algorithm for CA: %s\" % alg)\n\n if bc is None or not bc[0] or bc[1] is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA bad basicConstraints\")\n\n if eku is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA EKU not allowed\")\n\n if sia is None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA missing\")\n\n caRepository, rpkiManifest, signedObject, rpkiNotify = sia\n\n logger.debug(\"check_valid_request_ca(): sia: %r\", sia)\n\n if signedObject:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must not have id-ad-signedObject\")\n\n if not caRepository:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-caRepository\")\n\n if not any(uri.startswith(\"rsync://\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and not uri.endswith(\"/\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository does not end with slash\")\n\n if not rpkiManifest:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-rpkiManifest\")\n\n if not any(uri.startswith(\"rsync://\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and uri.endswith(\"/\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest ends with slash\")\n\n if any(not uri.startswith(\"http://\") and not uri.startswith(\"https://\") for uri in rpkiNotify):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiNotify neither HTTP nor HTTPS\")", "def ver_dec_content(parts, sign_key=None, enc_key=None, sign_alg=\"SHA256\"):\n\n if parts is None:\n return None\n elif len(parts) == 3:\n # verify the cookie signature\n timestamp, load, b64_mac = parts\n mac = base64.b64decode(b64_mac)\n verifier = HMACSigner(algorithm=sign_alg)\n if verifier.verify(\n load.encode(\"utf-8\") + timestamp.encode(\"utf-8\"), mac, sign_key.key\n ):\n return load, timestamp\n else:\n raise VerificationError()\n elif len(parts) == 4:\n b_timestamp = parts[0]\n iv = base64.b64decode(parts[1])\n ciphertext = base64.b64decode(parts[2])\n tag = base64.b64decode(parts[3])\n\n decrypter = AES_GCMEncrypter(key=enc_key.key)\n try:\n msg = decrypter.decrypt(ciphertext, iv, tag=tag)\n except InvalidTag:\n return None\n\n p = lv_unpack(msg.decode(\"utf-8\"))\n load = p[0]\n timestamp = p[1]\n if len(p) == 3:\n verifier = HMACSigner(algorithm=sign_alg)\n if verifier.verify(\n load.encode(\"utf-8\") + timestamp.encode(\"utf-8\"),\n base64.b64decode(p[2]),\n sign_key.key,\n ):\n return load, timestamp\n else:\n return load, timestamp\n return None", "def extract (msgfile, key):\n m = email.message_from_file(msgfile)\n From, To, Subject, Date = caption(m)\n #Text, Html, Files, Parts = pullout(m, key)\n Text = Text.strip(); Html = Html.strip()\n msg = {\"subject\": Subject, \"from\": From, \"to\": To, \"date\": Date,\n \"text\": Text, \"html\": Html, \"parts\": Parts}\n if Files: msg[\"files\"] = Files\n return msg", "def fetch_cert(source, entry, s3_client):\n if source == \"s3\":\n bucket_and_key = parse_s3_url(entry)\n logger.info(\"...reading s3 source = {}\".format(bucket_and_key))\n pem_cert = s3_client.get_object(\n Bucket=bucket_and_key[\"bucket\"], Key=bucket_and_key[\"key\"]\n )\n pem_cert_body = pem_cert[\"Body\"].read()\n elif source == \"memory\":\n logger.info(\"...reading from memory\")\n pem_cert_body = entry\n else:\n raise ValueError(\n \"Invalid cert entry type {}, \" \"must be one of s3, memory\".format(source)\n )\n\n # Python3 will return a byte string, Python2 will return a string\n if type(pem_cert_body) == bytes:\n pem_cert_body = pem_cert_body.decode(\"utf-8\")\n\n return pem_cert_body", "def processResponse(token, enc_key, sig_key):\n payload = []\n # Decrypt encrypted token (JWE).\n enc = jwe.JWE()\n enc.deserialize(token, key=enc_key)\n payload.append(enc.payload.decode(\"utf-8\"))\n # This again contains a signed token (JWS), so we deserialize it and verify the signature.\n sig = jws.JWS()\n sig.deserialize(payload[0])\n sig.verify(sig_key)\n payload.append(sig.payload.decode(\"utf-8\"))\n return payload", "def read_snac(snac):\n if snac.endswith(\".snac\"):\n path = snac\n else:\n path = snac + \".snac\"\n d = {}\n with open(path) as f:\n last = None\n for l in f:\n if l.startswith(\"#\"): continue\n if \":\" in l:\n key, val = l.split(\":\")\n last = key.strip()\n val = val.strip()\n else:\n val = l.strip()\n if last:\n t = d.setdefault(last, [])\n t.append(val)\n else:\n print(\"Unexpected input: \", last)\n for a in d:\n d[a] = \" \".join(d[a])\n return d", "def from_bytes(cls, bytes):\n construct = _constructs.Certificate.parse(bytes)\n return cls(\n certificate_list=[\n ASN1Cert(\n asn1_cert=asn1cert.asn1_cert\n )\n for asn1cert in construct.certificate_list],\n )", "def extractParamsFromKey(key: str) -> []:\n l = base64.b64decode(key).decode('ascii')\n \n param1 = l.split('\\n')[0]\n param2 = l.split('\\n')[1]\n #convert back to int\n param1 = int(param1, 16)\n param2 = int(param2, 16)\n \n if args.verbose : print(param1,param2)\n return [param1,param2]", "def parse_entry(lines):\n entry = {}\n for line in lines:\n line = line.replace('\\n', '').replace('\\r', '')\n if ':: ' in line:\n (key, value) = line.split(':: ')\n value = base64.b64decode(value).decode('utf-8')\n elif ': ' in line:\n (key, value) = line.split(': ')\n else:\n continue\n if key not in entry:\n entry[key] = []\n entry[key].append(value)\n return entry", "def rsa_pkcs1v15_decrypt(self, data):\n pass", "def Certificate(self) -> _n_8_t_0:", "def Certificate(self) -> _n_8_t_0:", "def extract_pkginfo(package):\n with tarfile.open(package, mode='r|*', encoding='utf-8') as tar:\n # Manual seeking to find .PKGINFO without having to uncompress the whole package\n while True:\n f = tar.next()\n if f.name == '.PKGINFO':\n break\n pkginfo = tar.extractfile(f).readlines()\n # Parse .PKGINFO\n res = dict()\n for line in pkginfo:\n m = re.match(r'([^=]*) = (.*)', line.decode('utf8'))\n if m:\n # TODO: support multi-valued attributes\n key, value = m[1], m[2].strip()\n res[key] = value\n return res", "def get_contents():\n # parse POSTed contents:\n contents = request.get_json()\n\n # get email and password:\n email = contents.get('email', None)\n if email is None:\n raise JWTError(\n {\n 'code': 'invalid_data',\n 'description': 'Missing parameter: email'\n }, \n 400\n )\n\n password = contents.get('password', None)\n if password is None:\n raise JWTError(\n {\n 'code': 'invalid_data',\n 'description': 'Missing parameter: password'\n }, \n 400\n )\n \n # formate:\n contents = {\n 'email': email, \n 'password': password\n }\n\n return contents", "def extract(key, path_pdf):\n\n path_tmp_pdf = extract_first_page(path_pdf)\n\n # extract all text from first page\n raw_text = extract_text(path_tmp_pdf)\n\n # extract abstract from whole page and replace hyphens etc.\n abstract = extract_abstract(raw_text)\n\n # something went wrong when abstract is longer than 1500 chars\n if len(abstract) > MAX_LEN:\n print('{}: Abstract is too long.'.format(path_pdf))\n\n if not abstract:\n print('{}: Could not extract abstract.'.format(path_pdf))\n\n # clean up temp file\n os.unlink(path_tmp_pdf)\n\n # TODO: Fix this return object\n out = {'@key': key, 'abstract': abstract}\n\n return out", "def load_x509_cert(url, httpc, spec2key, **get_args):\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []", "def opensslCmsSignedDataCreate( conveyedInfoFile, cert, privateKey ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-sign\", \"-in\", conveyedInfoFile,\n \"-signer\", cert,\n \"-inkey\", privateKey,\n \"-outform\", \"der\", \"-nodetach\" ]\n conveyedInfoCmsSignedDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsSignedDerBase64", "def parse(self):\n results = {}\n\n # get the signature info via the codesign utility\n args = [\"codesign\",\"-dvvvv\", self.file_name]\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, error_output = proc.communicate()\n if proc.returncode: #error, probably file not signed\n results[\"signature\"] = error_output\n else:\n results[\"signature\"] = output\n\n #get the file object\n file_object = open(self.file_name, 'rb')\n\n\n #Use the macho library to parse out some structures\n pFile = MachO(self.file_name)\n\n #if this is a fat file, it will have multiple Mach-O objects inside it\n results[\"FAT_header\"] = self.parseFATHeader(file_object, pFile)\n\n #parse all the Mach-O headers\n i = 1\n for h in pFile.headers:\n results[\"MachO_header\" + str(i)] = self.parseMachOHeader(h, file_object)\n i +=1\n\n #close the file\n file_object.close()\n\n #return the dict of results\n return results", "def ParsePkgInfoFile(filename, valid_keys=None, required_keys=None):\n with open(filename) as f:\n return ParsePkgInfo(f.read(), filename, valid_keys, required_keys)", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def read_enc_settings():\n print(\"Decrypting {}\".format(ENC_SETTINGS))\n try:\n output = subprocess.check_output(['gpg', '-d', ENC_SETTINGS])\n except subprocess.SubprocessError:\n print(\"Decryption failed, ignoring\")\n return\n config = ConfigParser()\n config.read_string(output.decode('utf8', errors='ignore'))\n return config", "def parse_dict(self, data):\n self.public_key = VerifyingKey(data['public_key'])\n self.signature = base58.b58decode(data['signature']) if data['signature'] else None", "def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")", "def decode(self, crypto):", "def extract_ca_private_key_bytes_from_pem(pem_content):\n found_marker = False\n for begin_marker in [constants.BEGIN_PRIVATE_KEY_MARKER,\n constants.BEGIN_RSA_PRIVATE_KEY_MARKER]:\n begin_search = pem_content.find(begin_marker)\n if begin_search >= 0:\n found_marker = True\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n found_marker = False\n for end_marker in [constants.END_PRIVATE_KEY_MARKER,\n constants.END_RSA_PRIVATE_KEY_MARKER]:\n end_search = pem_content.find(end_marker)\n if end_search >= 0:\n found_marker = True\n end_search += len(end_marker)\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n base64_key = base64.encode_as_text(pem_content[begin_search:end_search])\n return base64_key", "def from_buffer(data, encoding='pem'):\n return X509Csr.from_open_file(io.BytesIO(data), encoding)", "def verify(self):\n if not self.public_key:\n self.fetch_public_key()\n data = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}data\").text\n sig = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}sig\").text\n sig_contents = '.'.join([\n data,\n b64encode(b\"application/xml\").decode(\"ascii\"),\n b64encode(b\"base64url\").decode(\"ascii\"),\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n ])\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key))\n if not cipher.verify(sig_hash, urlsafe_b64decode(sig)):\n raise SignatureVerificationError(\"Signature cannot be verified using the given public key\")", "def test_ws_getItemInfosWithBinary(self):\n self.changeUser('pmCreator1')\n self.failUnless(len(self.portal.portal_catalog(portal_type='MeetingItemPma')) == 0)\n # prepare data for a default item\n req = self._prepareCreationData()\n # add one annex\n data = {'title': 'My annex 1', 'filename': 'smallTestFile.pdf', 'file': 'smallTestFile.pdf'}\n req._creationData._annexes = [self._prepareAnnexInfo(**data)]\n # create the item\n newItem, reponse = self._createItem(req)\n # get informations about the item, by default include_annex_binary is True\n resp = self._getItemInfos(newItem.UID(), showAnnexes=True, toBeDeserialized=False)\n # we have 1 annex\n self.assertEqual(len(resp._itemInfo[0]._annexes), 1)\n # the returned annex is the one created\n self.assertEqual(resp._itemInfo[0]._annexes[0]._title, 'My annex 1')\n # file content is preserved correctly\n annex_file = open(os.path.join(os.path.dirname(__file__), data.get('file')))\n self.assertEqual(resp._itemInfo[0]._annexes[0]._file, annex_file.read())\n # get informations about the item, set include_annex_binary to False\n resp = self._getItemInfos(newItem.UID(),\n showAnnexes=True,\n include_annex_binary=False,\n toBeDeserialized=False)\n # we have 1 annex\n self.assertEqual(len(resp._itemInfo[0]._annexes), 1)\n # the returned annex is the one created\n self.assertEqual(resp._itemInfo[0]._annexes[0]._title, 'My annex 1')\n # attribute _file of the annex should be empty\n self.assertFalse(resp._itemInfo[0]._annexes[0]._file)", "def cert_info(user, course):\r\n if not course.may_certify():\r\n return {}\r\n\r\n return _cert_info(user, course, certificate_status_for_student(user, course.id))", "def extract(self):\n\n self.decode(CMS_object.extract(self))\n return self.get_content()", "def _read_ent(ent_file):\r\n with open(ent_file, 'rb') as f:\r\n f.seek(352) # end of header\r\n\r\n note_hdr_length = 16\r\n\r\n allnote = []\r\n while True:\r\n note = {}\r\n note['type'], = unpack('<i', f.read(4))\r\n note['length'], = unpack('<i', f.read(4))\r\n note['prev_length'], = unpack('<i', f.read(4))\r\n note['unused'], = unpack('<i', f.read(4))\r\n if not note['type']:\r\n break\r\n s = f.read(note['length'] - note_hdr_length)\r\n s = s[:-2] # it ends with one empty byte\r\n s = s.decode('utf-8', errors='replace')\r\n s1 = s.replace('\\n', ' ')\r\n s1 = s1.replace('\\\\xd ', '')\r\n s1 = s1.replace('(.', '{')\r\n s1 = sub(r'\\(([A-Za-z0-9,\" ]*)\\)', r'[\\1]', s1)\r\n s1 = s1.replace(')', '}')\r\n # s1 = s1.replace('\",', '\" :')\r\n s1 = sub(r'(\\{[\\w\"]*),', r'\\1 :', s1)\r\n s1 = s1.replace('{\"', '\"')\r\n s1 = s1.replace('},', ',')\r\n s1 = s1.replace('}}', '}')\r\n s1 = sub(r'\\(([0-9 ,-\\.]*)\\}', r'[\\1]', s1)\r\n try:\r\n note['value'] = eval(s1)\r\n except:\r\n note['value'] = s\r\n allnote.append(note)\r\n return allnote", "def read_config(self, config):\n parser = SafeConfigParser()\n parser.read(config)\n\n cert = parser.get('https', 'cert')\n key = parser.get('https', 'key')\n\n return cert, key", "def parse_kiss(self):\n frame_len = len(self.frame)\n\n if frame_len < 16:\n self._logger.debug('Frame len(%s) < 16, Exiting.', frame_len)\n return\n\n for raw_slice in range(0, frame_len):\n\n # Is address field length correct?\n # Find the first ODD Byte followed by the next boundary:\n if (ord(self.frame[raw_slice]) & 0x01\n and ((raw_slice + 1) % 7) == 0):\n\n i = (raw_slice + 1) / 7\n\n # Less than 2 callsigns?\n if 1 < i < 11:\n # For frames <= 70 bytes\n if frame_len >= raw_slice + 2:\n if (ord(self.frame[raw_slice + 1]) & 0x03 == 0x03 and\n ord(self.frame[raw_slice + 2]) in\n [0xf0, 0xcf]):\n self._extract_kiss_text(raw_slice)\n self._extract_kiss_destination()\n self._extract_kiss_source()\n self._extract_kiss_path(i)", "def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)", "def _read_all(self):\n try:\n with open(self.keystore_file, \"r\") as filep:\n return json.load(filep)\n except (IOError, OSError, ValueError):\n return {}", "def test_signature_verification(self):\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n\n lines = SIGNATURES.split('\\n')\n\n # empty keyring\n keyring = ima_file_signatures.ImaKeyring()\n self.assertTrue(ima.process_measurement_list(lines, ima_keyring=keyring) is None)\n\n # add key for 1st entry; 1st entry must be verifiable\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:1], ima_keyring=keyring) is not None)\n self.assertTrue(ima.process_measurement_list(lines[1:2], ima_keyring=keyring) is None)\n\n # add key for 2nd entry; 1st & 2nd entries must be verifiable\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:2], ima_keyring=keyring) is not None)", "def __verify_details(self):\n if self.major[0] not in self.data[self.root]:\n self.data[self.root][self.major[0]] = {}\n for key, value in self.template_data[self.root][self.major[0]].items():\n key, value = self.__verified_details_key_value(key, value)\n self.data[self.root][self.major[0]][key] = self.__verify_values(key, value, self.data[self.root][self.major[0]])", "def test__format_asn_dict(self, parser):\n for key, value in RPKI_Validator_Wrapper.get_validity_dict().items():\n d = {'asn': 'AS198051', 'prefix': '1.2.0.0/16', 'validity': key}\n assert parser._format_asn_dict(d) == [198051, '1.2.0.0/16', value]", "def test_parse_direct_response(self):\n response = \"\"\"\\\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<createCustomerProfileTransactionResponse xmlns=\"AnetApi/xml/v1/schema/AnetApiSchema.xsd\">\n <messages>\n <resultCode>Ok</resultCode>\n <message>\n <code>I00001</code>\n <text>Successful.</text>\n </message>\n </messages>\n <directResponse>*1*;*1*;*1*;*This transaction has been approved.*;*000000*;*Y*;*2000000001*;*INV000001*;*description of transaction*;*10.95*;*CC*;*auth_capture*;*custId123*;*John*;*Doe*;**;*123 Main St., foo*;*Bellevue*;*WA*;*98004*;*USA*;*000-000-0000*;**;*[email protected]*;*John*;*Doe*;**;*123 Main St.*;*Bellevue*;*WA*;*98004*;*USA*;*1.00*;*0.00*;*2.00*;*FALSE*;*PONUM000001*;*D18EB6B211FE0BBF556B271FDA6F92EE*;*M*;*buaaahahah , *;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;**;*wallers,*</directResponse>\n</createCustomerProfileTransactionResponse>\"\"\"\n resp = x.to_dict(response, responses.cim_map, delimiter=u\";\", encapsulator=u\"*\")\n assert resp.direct_response.code == u\"1\"\n assert resp.direct_response.address == u\"123 Main St., foo\"\n assert resp.direct_response.holder_verification == u\"buaaahahah , \"", "def read_from(cls, s, taproot: bool = False):\n first = s.read(1)\n origin = None\n if first == b\"[\":\n prefix, char = read_until(s, b\"]\")\n if char != b\"]\":\n raise ArgumentError(\"Invalid key - missing ]\")\n origin = KeyOrigin.from_string(prefix.decode())\n else:\n s.seek(-1, 1)\n k, char = read_until(s, b\",)/\")\n der = b\"\"\n # there is a following derivation\n if char == b\"/\":\n der, char = read_until(s, b\"<{,)\")\n # legacy branches: {a,b,c...}\n if char == b\"{\":\n der += b\"{\"\n branch, char = read_until(s, b\"}\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing }\")\n der += branch + b\"}\"\n rest, char = read_until(s, b\",)\")\n der += rest\n # multipart descriptor: <a;b;c;...>\n elif char == b\"<\":\n der += b\"<\"\n branch, char = read_until(s, b\">\")\n if char is None:\n raise ArgumentError(\"Failed reading the key, missing >\")\n der += branch + b\">\"\n rest, char = read_until(s, b\",)\")\n der += rest\n if char is not None:\n s.seek(-1, 1)\n # parse key\n k, xonly_repr = cls.parse_key(k, taproot)\n # parse derivation\n allow_hardened = isinstance(k, bip32.HDKey) and isinstance(k.key, ec.PrivateKey)\n derivation = AllowedDerivation.from_string(\n der.decode(), allow_hardened=allow_hardened\n )\n return cls(k, origin, derivation, taproot, xonly_repr)", "def extract_aes_key(self) -> bytes:\r\n log(\"extract_aes_key start\")\r\n try:\r\n key_base64_raw: bytes = self.file_lines[0]\r\n except IndexError:\r\n # shouldn't be reachable due to test for emptiness prior in code, keep around anyway.\r\n log(\"extract_aes_key fail 1\")\r\n raise DecryptionKeyInvalidError(\"There was no decryption key.\")\r\n \r\n # Test that every byte in the byte-string of the raw key is a valid url-safe base64\r\n # character this also cuts down some junk files.\r\n for c in key_base64_raw:\r\n if c not in URLSAFE_BASE64_CHARACTERS:\r\n log(f\"extract_aes_key fail 2: '{key_base64_raw.decode()}' character: '{chr(c)}'\")\r\n raise DecryptionKeyInvalidError(f\"Key not base64 encoded: {str(key_base64_raw)}\")\r\n \r\n # handle the various cases that can occur when extracting from base64.\r\n try:\r\n decoded_key: bytes = decode_base64(key_base64_raw)\r\n except (TypeError, PaddingException, Base64LengthException) as decode_error:\r\n log(\"extract_aes_key fail 3\")\r\n raise DecryptionKeyInvalidError(f\"Invalid decryption key: {decode_error}\")\r\n \r\n base64_key = self.rsa_decrypt(decoded_key)\r\n \r\n try:\r\n decrypted_key: bytes = decode_base64(base64_key)\r\n if not decrypted_key:\r\n log(\"extract_aes_key fail 4\")\r\n raise TypeError(f\"decoded key was '{decrypted_key}'\")\r\n except (TypeError, IndexError, PaddingException, Base64LengthException) as decr_error:\r\n log(\"extract_aes_key fail 5\")\r\n raise DecryptionKeyInvalidError(f\"Invalid decryption key: {decr_error}\")\r\n \r\n # If the decoded bits of the key is not exactly 128 bits (16 bytes) that probably means that\r\n # the RSA encryption failed - this occurs when the first byte of the encrypted blob is all\r\n # zeros. Apps require an update to solve this (in a future rewrite we should use a correct\r\n # padding algorithm).\r\n if len(decrypted_key) != 16:\r\n log(\"extract_aes_key 6\")\r\n raise DecryptionKeyInvalidError(f\"Decryption key not 128 bits: {decrypted_key}\")\r\n \r\n if self.participant.os_type == IOS_API:\r\n self.populate_ios_decryption_key(base64_key)\r\n \r\n log(\"extract_aes_key success\")\r\n return decrypted_key", "def _sloppy_parse_user_and_api_data (self, key, contents):\n key_start = contents.find(key + '\"')\n if int(key_start) == -1:\n return None\n sub_contents = contents[int(key_start):]\n l = sub_contents.find('\",')\n return contents[(int(key_start)+len(key)+3):int(key_start)+l].decode('string_escape')", "def _get_key_pair_from_sk(sk: ecdsa.SigningKey) -> typing.Tuple[bytes, bytes]:\n return sk.to_string(), \\\n sk.verifying_key.to_string(\"compressed\")", "def verify(content):\n\n # see if the content is a list and then extract the first data\n if isinstance(content, list):\n if len(content) > 0:\n content = content[0]\n # convert to unicode to str\n x = content.encode('ascii', 'ignore').strip()\n x = re.sub('\\n','', x)\n x = re.sub('\\t', '', x)\n return x\n else:\n return ''\n else:\n # convert unicode to str\n return content.encode('ascii', 'ignore').strip()", "def fetch_x509_bundles(self) -> X509BundleSet:", "def check_dnssec(text):\n\n try:\n from dns.exception import DNSException\n import dns.dnssec\n import dns.rrset\n import Crypto.PublicKey.RSA\n #import ecdsa.ecdsa\n except ImportError:\n sys.exit(\"Problem importing DNSPython or supporting crypto packages, are they installed?\")\n\n wired_ttl = \"3600\"\n wired_rdclass = \"IN\"\n\n rrs = {}\n\n for line in text.splitlines():\n\n try:\n name, ttl, rdclass, rdtype, rdata = line.split(None, 4)\n except ValueError:\n continue\n\n if ttl != wired_ttl or rdclass != wired_rdclass:\n continue\n\n try:\n rrs[name, rdtype].append(rdata)\n except KeyError:\n rrs[name, rdtype] = [rdata]\n\n # Done parsing. We expect to have seen an A RRset, an RRSIG of that\n # A RRset, and the DNSKEY that we'll need to verify the RRSIG.\n\n if len(rrs) != 3:\n sys.exit(\"Expected two RRsets and an RRSIG, got %r\" % rrs)\n\n rrs = dict((rdtype, dns.rrset.from_text_list(name, int(wired_ttl), wired_rdclass, rdtype, rrs[name, rdtype]))\n for name, rdtype in rrs)\n\n try:\n dns.dnssec.validate(rrs[\"A\"], rrs[\"RRSIG\"], { rrs[\"DNSKEY\"].name : rrs[\"DNSKEY\"] })\n except DNSException, e:\n sys.exit(\"DNSSEC verification failed: %s\" % e)\n\n sys.stdout.write(\"\\nDNSSEC verification successful!\\n\\n\")", "def _extract_certificate_chain(connection):\n logger = getLogger(__name__)\n cert_data = {}\n logger.debug(\n \"# of certificates: %s\",\n len(connection.get_peer_cert_chain()))\n\n for cert in connection.get_peer_cert_chain():\n logger.debug(\n u'subject: %s, issuer: %s', cert.get_subject(),\n cert.get_issuer())\n data = _extract_values_from_certificate(cert)\n logger.debug('is_root_ca: %s', data[u'is_root_ca'])\n cert_data[cert.get_subject().der()] = data\n return _create_pair_issuer_subject(cert_data)", "def test_valid_xban():\n\n # first is a valid dict but the second one is not\n stream = [\n {\"xban_config\": {\"title\": \"testfile\", \"description\": \"\", \"board_color\": [],}},\n {},\n ]\n assert xban_content(\"test/testfile.yaml\", stream) == stream", "def test_private_key_ec(self):\n priv = \"\"\"-----BEGIN EC PARAMETERS-----\nBggqhkjOPQMBBw==\n-----END EC PARAMETERS-----\n-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIJZ57L6f6ywtZa7VhsvthAShxjdrL9EIrVwVgxnmD5b3oAoGCCqGSM49\nAwEHoUQDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALsbu2xNCDBXJ0IJ4Sd\n3u4G1qvrKX0mBHd7yUPGui+7bvp084mNag==\n-----END EC PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBiTCCAS+gAwIBAgIJAINtiwRC4eBJMAoGCCqGSM49BAMCMCExDzANBgNVBAMM\nBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwHhcNMTgwNTI3MTAyNTIyWhcNMTgwNjI2\nMTAyNTIyWjAhMQ8wDQYDVQQDDAZFQyAyNTYxDjAMBgNVBAoMBVdlYkNBMFkwEwYH\nKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALs\nbu2xNCDBXJ0IJ4Sd3u4G1qvrKX0mBHd7yUPGui+7bvp084mNaqNQME4wHQYDVR0O\nBBYEFEmE51rEUz4TuD8oEAw2lvMfvi6LMB8GA1UdIwQYMBaAFEmE51rEUz4TuD8o\nEAw2lvMfvi6LMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgfiKDoHB3\nWzRO1juSMyVBuBw2p1o0ab+3fBNDvff8PXcCIQCUKIyzTnM7Wz6TkABfqOcmx7n4\nsbRvdOg3CepLGW3Ytw==\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_EC)", "def get_cert_file(self, bypass_time_validity_check=False):\n file_contents = (\n \"{} {} {}\"\n ).format(self.cert_key_type,\n str(base64.b64encode(self._sign_cert(bypass_time_validity_check)), encoding='ascii'),\n self.public_key_comment)\n return file_contents", "def get_contents(self, item):\n return self.decrypt(item.fullpath)", "def _extract_metadata(self, header, cleaner):\n metadata = []\n for k, v in header.items():\n key = str(cleaner(k)) # clean key and ensure it is a string\n val = str(cleaner(v)) # clean value and ensure it is a string\n if (key and val):\n metadata.append(Metadatum(key, val))\n return metadata", "def get_data(prog, domain, tlsa):\n # first, let's see if 'raw' exists as a file\n name = try_as_file(domain)\n\n # if 'None', then try as a domain to return a file\n if not name:\n name = try_as_domain(prog, domain)\n\n # files is now a file or a raw list (or else an exception was raised)\n if type(name) is list:\n grps = archive_groups(name)\n if grps:\n cert = [ certop.get_xive(tlsa.usage, g) for g in grps ]\n else:\n cert = [ certop.get_xive(tlsa.usage, name) ]\n if not cert:\n raise Except.FunctionError(\n \"no recognized files in directory '{}'\".format(domain))\n else:\n cert = [ name ]\n\n return [ [ c, certop.get_hash(tlsa.selector, tlsa.matching,\n certop.read_cert(c, tlsa.usage)) ]\n for c in cert ]", "def _parse(file_contents):\n\n if file_contents is None or file_contents == '':\n return {}\n\n result = {}\n\n for line in file_contents.splitlines():\n # Full line comment\n if line[:1] == '#':\n continue\n\n parts = line.split('=', 1)\n\n # Not a full key-value pair.\n if len(parts) < 2:\n continue\n\n result[parts[0].strip()] = parts[1].strip()\n\n return result", "def parse(self, lines):\n # convert file to string deleting end of line charcters\n citations_string = self.prepare_file(lines)\n # extract the entries from the string\n entries = list(self.find_entries(citations_string))\n entries.append(len(citations_string))\n # parse each entry to generate a citation\n for idx, jdx in zip(entries[:-1], entries[1:]):\n self.parse_entry(citations_string[idx:jdx])\n return self.force_field.citations", "def extract_subjects(subject_info_xml, primary_str):\n subject_info_pyxb = deserialize_subject_info(subject_info_xml)\n subject_info_tree = d1_common.cert.subject_info.gen_subject_info_tree(\n subject_info_pyxb, primary_str\n )\n return subject_info_tree.get_subject_set()", "def read_des_kvno(self, keytab, cell, realm):\n self._log(\"Read KVNO: keytab='%s', cell='%s', realm='%s'\" %\n (keytab, cell, realm), 'INFO')\n kvnos = [None]\n re_princ = re.compile(\"afs(/%s)?@%s\" % (cell, realm))\n re_des = re.compile(\"(des-)|(DES )\") # single des\n re_line = re.compile(\"\\s*(\\d+)\\s+(\\S+)\\s+\\((\\S+)\\)\")\n command = \"sudo klist -k -e %s\" % (keytab)\n self._log(\"Running: %s \" % (command), 'INFO')\n klist = os.popen(command)\n for line in klist.readlines():\n self._log(line.rstrip(), 'INFO')\n match = re_line.match(line)\n if match:\n kvno = int(match.group(1))\n princ = match.group(2)\n enctype = match.group(3)\n self._log(\"kvno=%d, princ='%s', enctype='%s'\" % (kvno, princ, enctype), 'INFO')\n if re_princ.match(princ) and re_des.match(enctype):\n kvnos.append(kvno)\n rc = klist.close()\n if not rc is None:\n raise AssertionError(\"klist failed: exit code=%d\" % (rc))\n kvno = sorted(kvnos, reverse=True)[0]\n if kvno is None:\n raise AssertionError(\"Failed to find a kvno of afs key in file '%s'.\" % (keytab))\n self._log(\"kvno: %d\" % (kvno), 'INFO')\n return kvno", "def test_to_and_from_json_signed_order(\n pydex_client\n):\n expected_order_json = {\n 'makerAddress': '0x5409ed021d9299bf6814279a6a1411a7e866a631',\n 'takerAddress': '0x0000000000000000000000000000000000000000',\n 'makerFee': '0',\n 'takerFee': '0',\n 'senderAddress': '0x0000000000000000000000000000000000000000',\n 'makerAssetAmount': '50000000000000',\n 'takerAssetAmount': '100000000000000',\n 'makerAssetData': '0xf47261b0000000000000000000000000c4abc01578139e2105d9c9eba0b0aa6f6a60d082',\n 'takerAssetData': '0xf47261b0000000000000000000000000358b48569a4a4ef6310c1f1d8e50be9d068a50c6',\n 'exchangeAddress': '0xbce0b5f6eb618c565c3e5f5cd69652bbc279f44e',\n 'salt': '314725192512133120',\n 'feeRecipientAddress': '0x0000000000000000000000000000000000000000',\n 'expirationTimeSeconds': 1550439701,\n 'hash': '0x3fbc553ade14a36ba6e5055817d44170797c560e62d0db5c79aa0739732c8199',\n 'signature': (\n '0x1c4f65f2bfbf384e783cbb62ef27e7091fa02d9fa9ad1299670f05582325e027b'\n '9261afef5bceed9e0c84a92aeeead35df85b1cf174a02f0f3d2935c73552ac28803'),\n }\n # make a new SignedOrder object without the signature of hash\n order = SignedOrder.from_json(\n {k: v for k, v in expected_order_json.items()\n if k not in [\"signature\", \"hash\"]},\n include_signature=False,\n )\n print(pydex_client.private_key)\n print(order.update().hash)\n # sign the order\n order.signature = pydex_client.sign_hash_zx_compat(order.update().hash)\n assert order.to_json(include_hash=True) == expected_order_json", "def opensslCmsDataCreate( conveyedInfoFile ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-data_create\", \"-in\", conveyedInfoFile,\n \"-outform\", \"der\" ]\n conveyedInfoCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsDerBase64", "def pfx2pem_memmory(input_file):\n pfx = open(input_file, 'rb').read()\n p12 = crypto.load_pkcs12(pfx)\n pem = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pem += crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n return pem", "def testExtractionOfKeySignatureAttributes(self):\n num_to_major_key = {0: 'C',\n 1: 'G',\n 2: 'D',\n 3: 'A',\n 4: 'E',\n 5: 'B',\n 6: 'F#',\n 7: 'C#',\n 8: 'G#',\n 9: 'D#',\n 10: 'A#',\n 11: 'E#',\n 12: 'B#',\n -2: 'Bb',\n -12: 'Dbb',\n -11: 'Abb',\n -10: 'Ebb',\n -9: 'Bbb',\n -8: 'Fb',\n -7: 'Cb',\n -6: 'Gb',\n -5: 'Db',\n -4: 'Ab',\n -3: 'Eb',\n -1: 'F'}\n num_to_minor_key = {0: 'a',\n 1: 'e',\n 2: 'b',\n 3: 'f#',\n 4: 'c#',\n 5: 'g#',\n 6: 'd#',\n 7: 'a#',\n 8: 'e#',\n 9: 'b#',\n 10: 'f##',\n 11: 'c##',\n 12: 'g##',\n -2: 'g',\n -12: 'bbb',\n -11: 'fb',\n -10: 'cb',\n -9: 'gb',\n -8: 'db',\n -7: 'ab',\n -6: 'eb',\n -5: 'bb',\n -4: 'f',\n -3: 'c',\n -1: 'd'}\n\n for test_mode in ['major', 'minor']:\n for i in range(-12, 13):\n ks = key.KeySignature(i)\n ks.mode = test_mode\n if test_mode == 'major':\n key_map = num_to_major_key\n else:\n key_map = num_to_minor_key\n try:\n key_name, num_sharps, mode, tonic_pitchclass = (\n pretty_music21._extract_key_signature_attributes(ks))\n except pretty_music21.PrettyMusic21Error:\n self.assertTrue(i < 7 or i > 7)\n continue\n self.assertEqual(key_name, key_map[i])\n if mode == 'minor':\n self.assertEqual(\n key.sharpsToPitch(num_sharps + 3).name,\n key.convertKeyStringToMusic21KeyString(key_name).upper())\n else:\n self.assertEqual(\n key.sharpsToPitch(num_sharps).name,\n key.convertKeyStringToMusic21KeyString(key_name).upper())\n\n self.assertEqual(mode, ks.mode)\n check_pitch = pitch.Pitch(\n key.convertKeyStringToMusic21KeyString(key_map[i]))\n check_pitchclass = check_pitch.pitchClass\n self.assertEqual(tonic_pitchclass, check_pitchclass)", "def _verify_cas2(ticket, service):\n try:\n from xml.etree import ElementTree\n except ImportError:\n from elementtree import ElementTree\n\n params = {'ticket': ticket, 'service': service}\n url = (urljoin(settings.CAS_SERVER_URL, 'serviceValidate') + '?' +\n urlencode(params))\n page = urlopen(url)\n try:\n response = page.read()\n '''Remove \\n\\t character from response xml'''\n response = re.sub(r'(?m)[\\t\\n]+', \"\", response)\n tree = ElementTree.fromstring(response)\n if tree[0].tag.endswith('authenticationSuccess'):\n member_of = []\n access_token = None\n user_name = None\n first_name = None\n last_name = None\n department = None\n for xmlTag in tree[0]:\n if xmlTag.tag.endswith('user'):\n user_name = xmlTag.text\n elif xmlTag.tag.endswith('firstName'):\n first_name = xmlTag.text\n elif xmlTag.tag.endswith('lastName'):\n last_name = xmlTag.text\n\n user_args = {\n \"user_name\":user_name,\n \"first_name\": first_name,\n \"last_name\": last_name\n }\n \n return user_args\n else:\n return None\n except Exception, e:\n logger.error(e)\n finally:\n page.close()", "def parse_contents(self):\n self.parsed_contents = tokenize(self.contents)[0]", "def validate_schema(self):\n for _, certificate in self.certificates_to_issue.items():\n with open(certificate.signed_cert_file_name) as cert:\n cert_json = json.load(cert)\n validate_unsigned_v1_2(cert_json)", "def _map_cert_tls_container(cert):\n certificate = cert.get_certificate()\n pkey = cert_parser.dump_private_key(cert.get_private_key(),\n cert.get_private_key_passphrase())\n return data_models.TLSContainer(\n primary_cn=_get_primary_cn(certificate),\n private_key=pkey,\n certificate=certificate,\n intermediates=cert.get_intermediates())", "def _unarmor(pem_bytes):\n\n if not isinstance(pem_bytes, byte_cls):\n raise TypeError(unwrap(\n '''\n pem_bytes must be a byte string, not %s\n ''',\n _type_name(pem_bytes)\n ))\n\n # Valid states include: \"trash\", \"headers\", \"body\"\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n\n found_start = False\n found_end = False\n\n for line in pem_bytes.splitlines(False):\n if line == b'':\n continue\n\n if state == \"trash\":\n # Look for a starting line since some CA cert bundle show the cert\n # into in a parsed format above each PEM block\n type_name_match = re.match(b'^(?:---- |-----)BEGIN ([A-Z0-9 ]+)(?: ----|-----)', line)\n if not type_name_match:\n continue\n object_type = type_name_match.group(1).decode('ascii')\n\n found_start = True\n state = 'headers'\n continue\n\n if state == 'headers':\n if line.find(b':') == -1:\n state = 'body'\n else:\n decoded_line = line.decode('ascii')\n name, value = decoded_line.split(':', 1)\n headers[name] = value.strip()\n continue\n\n if state == 'body':\n if line[0:5] in (b'-----', b'---- '):\n der_bytes = base64.b64decode(base64_data)\n\n yield (object_type, headers, der_bytes)\n\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n found_end = True\n continue\n\n base64_data += line\n\n if not found_start or not found_end:\n raise ValueError(unwrap(\n '''\n pem_bytes does not appear to contain PEM-encoded data - no\n BEGIN/END combination found\n '''\n ))", "def read_in_xforce_keys(file):\n key = file.readline().strip()\n password = file.readline().strip()\n if validate_api_creds(key) and validate_api_creds(password):\n return key, password\n else:\n print(\"API credentials invalid. Please check your key and password. Exiting...\")\n sys.exit(1)", "def plist_from_pkcs7(pkcs7):\n # DEP request\n\n # base64 encode the DER data, and wrap in a PEM-ish format for SMIME.load_pkcs7_bio()\n req_data = base64_to_pem('PKCS7', pkcs7)\n\n p7_bio = BIO.MemoryBuffer(str(req_data))\n pkcs7 = SMIME.load_pkcs7_bio(p7_bio)\n\n p7_signers = pkcs7.get0_signers(X509.X509_Stack())\n\n signer = SMIME.SMIME()\n signer.set_x509_store(X509.X509_Store())\n signer.set_x509_stack(p7_signers)\n\n # TODO/XXX: not verifying ANY certificates!\n #\n # spec says we should verify against the \"Apple Root CA\" and that this\n # CMS message contains all intermediates to do that verification.\n # M2Crypto has no way to get at all the intermediate certificates to\n # do this manually we'd need to extract all of the certificates and\n # verify the chain aginst it. Note as of 2016-03-14 on a brand new\n # iPad Apple was including an expired certificate in this chain. Note\n # also that at least one of the intermediate certificates had a\n # certificate purpose apparently not appropraite for CMS/SMIME\n # verification. For now just verify with no CA and skip any\n # verification.\n plist_text = signer.verify(pkcs7, None, flags=SMIME.PKCS7_NOVERIFY)\n\n plist = plistlib.readPlistFromString(plist_text)\n\n return plist", "def verify_kpoints_content(kpoints):\n assert kpoints['mode'] == 'automatic'\n assert kpoints['comment'] == 'Example file'\n assert kpoints['divisions'] == [4, 4, 4]\n assert kpoints['shifts'] == [0.0, 0.0, 0.0]\n assert kpoints['points'] == None\n assert kpoints['centering'] == 'Gamma'\n assert kpoints['tetra'] == None\n assert kpoints['tetra_volume'] == None\n assert kpoints['num_kpoints'] == 0", "def get_asymm_keys(parameters):\n\tprivate_key = parameters.generate_private_key()\n\treturn private_key,private_key.public_key()", "def ft_seal_and_unseal():\n print \"generating key pair\"\n pubkey_pem, privkey_pem = api.generate_key_pair( 4096 )\n \n sealed_buf = create_sealed_and_signed_blob( privkey_pem, \"foo\", \"hello world\")\n print \"sealed data is:\\n\\n%s\\n\\n\" % sealed_buf\n\n buf = verify_and_unseal_blob( pubkey_pem, \"foo\", sealed_buf )\n print \"unsealed data is: \\n\\n%s\\n\\n\" % buf", "def test_create_and_import_encrypted_rsa(self):\n name = \"key_encrypted\"\n password = \"123456\"\n bits= 3072\n generate_and_write_rsa_keypair(name, bits, password)\n private_key = import_rsa_key_from_file(name, password)\n public_key = import_rsa_key_from_file(name + \".pub\")\n\n securesystemslib.formats.KEY_SCHEMA.check_match(private_key)\n self.assertTrue(private_key[\"keyval\"].get(\"private\"))\n self.assertTrue(\n securesystemslib.formats.PUBLIC_KEY_SCHEMA.matches(public_key))", "def read_meta(self):\n meta = cPickle.load(open('../sugar_analysis_data/META-CABALLO2.pkl'))\n self.meta_sn_name_list = []\n self.meta_zcmb = []\n self.meta_x0 =[]\n self.meta_x0_err = []\n self.meta_x1 =[]\n self.meta_x1_err = []\n self.meta_c = []\n self.meta_c_err = []\n self.meta_mb = []\n self.meta_mb_err = []\n self.meta_cov_x0_x1 = [] \n self.meta_cov_x0_c = []\n self.meta_cov_x1_c = []\n self.meta_cov_mb_x1 = []\n self.meta_cov_mb_c = [] \n self.meta_zhl = []\n self.meta_zhl_err = []\n self.meta_idr = []\n for meta_sn_name in meta.keys(): \n \n if meta[meta_sn_name]['idr.subset'] != 'bad' and meta[meta_sn_name]['idr.subset'] != 'auxiliary':\n \n self.meta_sn_name_list.append(meta_sn_name)\n self.meta_zhl_err.append(meta[meta_sn_name]['host.zhelio.err'])\n self.meta_zhl.append(meta[meta_sn_name]['host.zhelio'])\n self.meta_zcmb.append(meta[meta_sn_name]['host.zcmb'])\n self.meta_x0.append(meta[meta_sn_name]['salt2.X0'])\n self.meta_x0_err.append(meta[meta_sn_name]['salt2.X0.err'])\n self.meta_x1.append(meta[meta_sn_name]['salt2.X1'])\n self.meta_x1_err.append(meta[meta_sn_name]['salt2.X1.err'])\n self.meta_c.append(meta[meta_sn_name]['salt2.Color'])\n self.meta_c_err.append(meta[meta_sn_name]['salt2.Color.err'])\n self.meta_mb.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B'])\n self.meta_mb_err.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B.err'])\n self.meta_cov_x0_x1.append(meta[meta_sn_name]['salt2.CovX0X1'])\n self.meta_cov_x0_c.append(meta[meta_sn_name]['salt2.CovColorX0'])\n self.meta_cov_x1_c.append(meta[meta_sn_name]['salt2.CovColorX1'])\n self.meta_cov_mb_x1.append(meta[meta_sn_name]['salt2.CovRestFrameMag_0_BX1'])\n self.meta_cov_mb_c.append(meta[meta_sn_name]['salt2.CovColorRestFrameMag_0_B'])\n self.meta_idr.append(meta[meta_sn_name]['idr.subset'])\n \n self.meta_idr = np.array(self.meta_idr)\n self.meta_zcmb = np.array(self.meta_zcmb)\n self.meta_zhl = np.array(self.meta_zhl)\n self.meta_zhl_err = np.array(self.meta_zhl_err)\n self.meta_x0 = np.array(self.meta_x0)\n self.meta_x0_err = np.array(self.meta_x0_err)\n self.meta_x1 = np.array(self.meta_x1)\n self.meta_x1_err = np.array(self.meta_x1_err) \n self.meta_c = np.array(self.meta_c)\n self.meta_c_err = np.array(self.meta_c_err)\n self.meta_mb = np.array(self.meta_mb)\n self.meta_mb_err = np.array(self.meta_mb_err)\n self.meta_cov_x0_x1 = np.array(self.meta_cov_x0_x1)\n self.meta_cov_x0_c = np.array(self.meta_cov_x0_c)\n self.meta_cov_x1_c = np.array(self.meta_cov_x1_c)\n self.meta_cov_mb_x1 = np.array(self.meta_cov_mb_x1)\n self.meta_cov_mb_c = np.array(self.meta_cov_mb_c)", "def recheck_conts_package(conts_dict, si_result):\n print('######## BEGIN CHECKING PACKAGES ? ########')\n packages = si_result.get('packages', True)\n description = si_result.get('description_of_goods', False)\n pUnit = None\n print(packages == description)\n\n if packages == description:\n conts_package = []\n for unit in current_app.config['CONTAINER_PACKAGE_UNITS']:\n re_unit = re.compile(\n r'(\\d+\\W*(' +\n unit + r'(s|S)?\\s?(\\Ws\\W|\\WS\\W)?' +\n r')\\W*)|(\\W*(' +\n unit + r'(s|S)?\\s?(\\Ws\\W|\\WS\\W)?' +\n r')\\W*\\d+)')\n units_result = re.findall(re_unit, packages)\n\n if (\n len(units_result) == len(conts_dict)\n ) or (\n len(units_result) == len(conts_dict) + 1\n ):\n for result in units_result:\n print(result)\n if len(result[0]) > len(unit):\n print('result[0]', result[0])\n conts_package.append(\n re.sub(r'[^\\d]+', '', result[0]))\n elif len(result[4]) > len(unit):\n print('result[4]', result[4])\n conts_package.append(\n re.sub(r'[^\\d]+', '', result[4]))\n\n if unit.isupper():\n pUnit = unit + 'S'\n else:\n pUnit = unit + 's'\n\n break\n\n if len(conts_package) >= len(conts_dict):\n for i, cont in enumerate(conts_dict):\n cont['packages'] = str(conts_package[i]) + ' ' + pUnit\n\n return conts_dict" ]
[ "0.6905935", "0.519228", "0.5144184", "0.5090635", "0.4969031", "0.49095032", "0.48951414", "0.48807597", "0.4877804", "0.48451734", "0.4826676", "0.48249158", "0.48138803", "0.4775471", "0.47672594", "0.4758738", "0.47264582", "0.47241712", "0.46494445", "0.4630516", "0.46156985", "0.45940387", "0.4527909", "0.44569722", "0.44397488", "0.44365314", "0.44193998", "0.43797928", "0.4379079", "0.43483678", "0.43179575", "0.43128857", "0.4307328", "0.4287031", "0.4261894", "0.42452788", "0.42382973", "0.42382973", "0.4236558", "0.42353293", "0.42287666", "0.42106286", "0.42082044", "0.42079458", "0.4207256", "0.419797", "0.41977954", "0.41951755", "0.41936484", "0.41903394", "0.41861397", "0.41829437", "0.41691336", "0.41599068", "0.41565517", "0.41545564", "0.41470012", "0.41384822", "0.41373014", "0.41372928", "0.4135281", "0.41333413", "0.41238394", "0.41217786", "0.4109281", "0.41003856", "0.40994346", "0.4087244", "0.4083062", "0.4082089", "0.40722135", "0.40715888", "0.4069594", "0.40658382", "0.40646523", "0.40636414", "0.40618202", "0.4057143", "0.40437794", "0.4040022", "0.40300614", "0.4027009", "0.40246534", "0.40177453", "0.40155634", "0.40130794", "0.40125257", "0.4003664", "0.40009412", "0.39960966", "0.3990443", "0.3989237", "0.3984412", "0.39835197", "0.39830923", "0.39722922", "0.39716214", "0.3967184", "0.39671534", "0.3964747" ]
0.6486718
1
Decrypts encrypted ASN.1 data
def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password): decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher] # Modern, PKCS#5 PBES2-based encryption if encryption_algorithm_info.kdf == 'pbkdf2': if encryption_algorithm_info.encryption_cipher == 'rc5': raise ValueError(pretty_message( ''' PBES2 encryption scheme utilizing RC5 encryption is not supported ''' )) enc_key = pbkdf2( encryption_algorithm_info.kdf_hmac, password, encryption_algorithm_info.kdf_salt, encryption_algorithm_info.kdf_iterations, encryption_algorithm_info.key_length ) enc_iv = encryption_algorithm_info.encryption_iv plaintext = decrypt_func(enc_key, encrypted_content, enc_iv) elif encryption_algorithm_info.kdf == 'pbkdf1': derived_output = pbkdf1( encryption_algorithm_info.kdf_hmac, password, encryption_algorithm_info.kdf_salt, encryption_algorithm_info.kdf_iterations, encryption_algorithm_info.key_length + 8 ) enc_key = derived_output[0:8] enc_iv = derived_output[8:16] plaintext = decrypt_func(enc_key, encrypted_content, enc_iv) elif encryption_algorithm_info.kdf == 'pkcs12_kdf': enc_key = pkcs12_kdf( encryption_algorithm_info.kdf_hmac, password, encryption_algorithm_info.kdf_salt, encryption_algorithm_info.kdf_iterations, encryption_algorithm_info.key_length, 1 # ID 1 is for generating a key ) # Since RC4 is a stream cipher, we don't use an IV if encryption_algorithm_info.encryption_cipher == 'rc4': plaintext = decrypt_func(enc_key, encrypted_content) else: enc_iv = pkcs12_kdf( encryption_algorithm_info.kdf_hmac, password, encryption_algorithm_info.kdf_salt, encryption_algorithm_info.kdf_iterations, encryption_algorithm_info.encryption_block_size, 2 # ID 2 is for generating an IV ) plaintext = decrypt_func(enc_key, encrypted_content, enc_iv) return plaintext
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt(self, data):", "def rsa_pkcs1v15_decrypt(self, data):\n pass", "def decode(self, crypto):", "def decrypt(self, data):\n if not data:\n return ''\n data = self._crypt(data, self.DECRYPT)\n return self._unpad_data(data)", "def decrypted(data: str) -> str:\n\n return b64decode(data.encode('ascii')).decode('ascii')", "def decode(self, data):\n return self.__cipher.decrypt(data)", "def decrypt_message(self, env_key, data):\n\n if not env_key or not data:\n raise Exception('Arguments missing.')\n\n key = RSA.importKey(self.private_key)\n try:\n env_key = unquote(env_key).decode('utf8')\n data = unquote(data).decode('utf8')\n except AttributeError:\n # Python 3 compatible\n env_key = unquote(env_key)\n data = unquote(data)\n\n try:\n env_key = base64.b64decode(env_key)\n data = base64.b64decode(data)\n \n cipher = PKCS1_v1_5.new(key)\n\n sentinel = []\n session_key = cipher.decrypt(env_key, sentinel)\n\n rc4_cipher = ARC4.new(session_key)\n\n xml_data = rc4_cipher.decrypt(data)\n\n # TODO: add xml validation\n # schema_root = etree.XML(xml_data)\n # schema = etree.XMLSchema(schema_root)\n # parser = etree.XMLParser(schema=schema)\n\n return xml_data\n except Exception as e:\n if self.developement:\n exception(e)\n\n raise Exception('Could not decrypt message.')", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n bytes_data = bytes.fromhex(data)\n return Pad.unpad(obj.decrypt(bytes_data)).decode()", "def decrypt(self, encryptedserial):\n # Obtain data and metadata, but return only data\n data, _ = self.decrypt_with_metadata(encryptedserial)\n return data", "def decrypt_message(encrypted_message):", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n seed2 &= 0xFFFFFFFF\n value = struct.unpack(\"<I\", data[i*4:i*4+4])[0]\n value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n\n seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B)\n seed1 &= 0xFFFFFFFF\n seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n result.write(struct.pack(\"<I\", value))\n\n return result.getvalue()", "def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()", "def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )", "def decrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n decB64Msg = base64.decodestring(msg)\n\n f = open(myTmpDir + 'ct' + str(identity) + '.bin','wb')\n f.write(decB64Msg)\n f.close()\n\n os.popen(\"rsa.exe d \" + myTmpDir + \"ct\" + str(identity) + \".bin \" + myTmpDir + \"ptSender\" + str(identity) + \".bin\")\n\n with open(myTmpDir + \"ptSender\" + str(identity) + \".bin\", \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n decMsg = bytes(readFile)\n\n # TODO: overwirite\n outText.insert(tkinter.END, decMsg)", "def decrypt_data ( aes_key, data ) :\n decoded_data = decode_data( data )\n salt = decoded_data[ 0 : Crypto.Cipher.AES.block_size ]\n encrypted_data = decoded_data[ Crypto.Cipher.AES.block_size : ]\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n decrypted_data = cipher.decrypt( encrypted_data )\n\n return decrypted_data", "def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)", "def _decrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n return cipher.decrypt(data)", "def decrypt(cls, ciphertext_and_tag, aad, key, iv):", "def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "async def decrypt(self, data, sequence_no, direction='init', auth_data=None):\n\t\tedata = data[16:]\n\t\tsrv_sig = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(data[:16])\n\t\tsealedMessage = self.crypthandle_server.encrypt(edata)\n\t\tsignature = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, srv_sig.SeqNum, sealedMessage)\n\t\t#print('seqno %s' % sequence_no)\n\t\t#print('Srv sig: %s' % data[:16])\n\t\t#print('Calc sig: %s' % signature)\n\n\t\treturn sealedMessage, None", "def base64_aes_decrypt(self,data,key):\n cipher = AES.new(key)\n try:\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))\n except Exception, ex:\n return ''", "def decrypt(data):\n # Decrypt data if necessary\n result = None\n if str(data[:5]) == \"<?xml\":\n print(\" - Unprotected CETRAINER detected\")\n result = data\n else:\n print(\" - Protected CETRAINER detected. Decrypting...\")\n ckey = 0xCE\n for i in range(2, len(data)):\n data[i] = data[i] ^ data[i-2]\n for i in range(len(data)-2, -1, -1):\n data[i] = data[i] ^ data[i+1]\n for i in range(0, len(data)):\n data[i] = data[i] ^ ckey\n ckey = (ckey + 1) & 0xFF\n\n # Decompress if necessary and write data\n if data[:5] == b'CHEAT':\n result = zlib.decompress(data[5:], -15)\n result = result[4:]\n print(\" - Decompressed CETRAINER using new method\")\n else:\n result = zlib.decompress(data, -15)\n print(\" - Decompressed CETRAINER using old method\")\n return result", "def decrypt(algorithm, key, encrypted_data, associated_data):\n decryptor = Decryptor(algorithm, key, associated_data, encrypted_data.iv, encrypted_data.tag)\n return decryptor.update(encrypted_data.ciphertext) + decryptor.finalize()", "def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])", "def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def base64_aes_decrypt(self,data,key):\r\n cipher = AES.new(key)\r\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))", "def decipher_raw(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n e = [decrypt(u[i], u[i + 1], key) for i in range(len(u))[::2]]\n return b''.join([struct.pack('2I', ee, ef) for ee, ef in e])", "def _unarmor_pem_openssl_private(headers, data, password):\n\n enc_algo = None\n enc_iv_hex = None\n enc_iv = None\n\n if 'DEK-Info' in headers:\n params = headers['DEK-Info']\n if params.find(',') != -1:\n enc_algo, enc_iv_hex = params.strip().split(',')\n else:\n enc_algo = 'RC4'\n\n if not enc_algo:\n return data\n\n if enc_iv_hex:\n enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))\n enc_algo = enc_algo.lower()\n\n enc_key_length = {\n 'aes-128-cbc': 16,\n 'aes-128': 16,\n 'aes-192-cbc': 24,\n 'aes-192': 24,\n 'aes-256-cbc': 32,\n 'aes-256': 32,\n 'rc4': 16,\n 'rc4-64': 8,\n 'rc4-40': 5,\n 'rc2-64-cbc': 8,\n 'rc2-40-cbc': 5,\n 'rc2-cbc': 16,\n 'rc2': 16,\n 'des-ede3-cbc': 24,\n 'des-ede3': 24,\n 'des3': 24,\n 'des-ede-cbc': 16,\n 'des-cbc': 8,\n 'des': 8,\n }[enc_algo]\n\n enc_key = hashlib.md5(password + enc_iv[0:8]).digest()\n while enc_key_length > len(enc_key):\n enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()\n enc_key = enc_key[0:enc_key_length]\n\n enc_algo_name = {\n 'aes-128-cbc': 'aes',\n 'aes-128': 'aes',\n 'aes-192-cbc': 'aes',\n 'aes-192': 'aes',\n 'aes-256-cbc': 'aes',\n 'aes-256': 'aes',\n 'rc4': 'rc4',\n 'rc4-64': 'rc4',\n 'rc4-40': 'rc4',\n 'rc2-64-cbc': 'rc2',\n 'rc2-40-cbc': 'rc2',\n 'rc2-cbc': 'rc2',\n 'rc2': 'rc2',\n 'des-ede3-cbc': 'tripledes',\n 'des-ede3': 'tripledes',\n 'des3': 'tripledes',\n 'des-ede-cbc': 'tripledes',\n 'des-cbc': 'des',\n 'des': 'des',\n }[enc_algo]\n decrypt_func = crypto_funcs[enc_algo_name]\n\n if enc_algo_name == 'rc4':\n return decrypt_func(enc_key, data)\n\n return decrypt_func(enc_key, data, enc_iv)", "def basic_decrypt() -> Response:\n APP.log.debug(\"Request:\")\n APP.log.debug(json.dumps(APP.current_request.to_dict()))\n APP.log.debug(\"Ciphertext:\")\n APP.log.debug(APP.current_request.raw_body)\n\n try:\n # The decrypt oracle needs to be able to decrypt any message\n # it does not encrypt messages for anyone.\n client = aws_encryption_sdk.EncryptionSDKClient(commitment_policy=CommitmentPolicy.FORBID_ENCRYPT_ALLOW_DECRYPT)\n ciphertext = APP.current_request.raw_body\n plaintext, _header = client.decrypt(source=ciphertext, key_provider=_master_key_provider())\n APP.log.debug(\"Plaintext:\")\n APP.log.debug(plaintext)\n response = Response(body=plaintext, headers={\"Content-Type\": \"application/octet-stream\"}, status_code=200)\n except Exception as error: # pylint: disable=broad-except\n response = Response(body=str(error), status_code=400)\n\n APP.log.debug(\"Response:\")\n APP.log.debug(json.dumps(response.to_dict(binary_types=[\"application/octet-stream\"])))\n return response", "def decrypt_data(self, encrypted_data):\n from django.core.signing import loads\n return loads(encrypted_data, salt=self.salt_namespace)", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt", "def ecb_decrypt(self, encrypted_data, color):\n msg = b''\n for d in encrypted_data:\n encoded_bytes = d[0] + d[1]\n encoded_int = self.bytes_to_int(encoded_bytes)\n decoded_int = self.power(encoded_int, self.private_key, self.N)\n decoded_byte = self.int_to_bytes(decoded_int, len(d[0]))\n msg += decoded_byte\n return msg", "def decryptByteArray(self, data, keyobj):\n raise NotImplementedError(\"Is abstract\")", "def decrypt_raw(self, key, data):\n iv = data[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data[AES.block_size:])\n return self.__unpad(data)", "def decryptToString(self, data, keyobj):\n return self.decryptByteArray(data, keyobj).decode().split('\\x00')[0]", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decrypt(self, encrypted):\n\n encrypted = base64.b64decode(encrypted)\n IV = encrypted[:self.BLOCK_SIZE]\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return self._unpad(aes.decrypt(encrypted[self.BLOCK_SIZE:]))", "def Decrypt(self, input_bytes):\n ciph_bytes = input_bytes[keyczar.HEADER_SIZE:]\n decrypted = self.key.decrypt(ciph_bytes)\n return self.__Decode(decrypted)", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message", "def decode(data): #@NoSelf", "def decrypt(self, enc):\n\n enc = base64.b64decode(enc)\n iv = enc[:AES.block_size]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def extract(self, data):\n return ujson.loads(self.cipher.decrypt(data))", "def decode_and_decrypt(encoded_data, key):\r\n return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)", "def decrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)", "def decrypt(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.decrypt(msg).data", "def decryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n decoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code-key\n new = chr(change)\n string += new\n key += key_increment\n decoded = ''.join(string)\n return ('Decoded Message:\\t' + decoded)", "def decrypt_data(encryption_key, data, iv=None):\n if not data:\n logger.debug(\"Outdata is empty, nothing to decrypt\")\n return data\n # if iv is None the it's assumed that 12 bytes iv is\n # prepended in encrypted data\n data_byte = base64_to_byte_array(data)\n if iv is None:\n iv_length = IV_SIZE\n iv = data_byte[:iv_length]\n data_contains_iv = True\n else:\n iv_length = len(iv)\n data_contains_iv = False\n\n cipher = AES.new(encryption_key, AES.MODE_GCM, iv)\n # Split data into iv, tag and ciphered data\n if data_contains_iv:\n ciphertext_len = len(data_byte) - iv_length - TAG_SIZE\n ciphered_data = data_byte[iv_length: iv_length + ciphertext_len]\n tag = data_byte[-TAG_SIZE:]\n else:\n ciphertext_len = len(data_byte) - TAG_SIZE\n ciphered_data = data_byte[: ciphertext_len]\n tag = data_byte[-TAG_SIZE:]\n\n result = cipher.decrypt_and_verify(ciphered_data, tag).decode(\"utf-8\")\n logger.info(\"Decryption result at client - %s\", result)\n return result", "def read(self, chars=-1):\n self.__CheckOpen('read')\n is_data_avail = True\n if not self.__key:\n is_data_avail = self.__CreateKey()\n\n if is_data_avail and self.__key and not self.__cipher:\n is_data_avail = self.__CreateCipher()\n\n if is_data_avail and self.__key and self.__cipher:\n data_to_decrypt = b''\n need_more_data = True\n while need_more_data:\n read_bytes, is_data_avail = self.__ReadBytes(\n self.__key.block_size,\n block=False)\n if read_bytes:\n self.__encrypted_buffer += read_bytes\n\n reserved_data_len = util.HLEN\n if is_data_avail:\n reserved_data_len += self.__key.block_size\n\n available_data = self.__encrypted_buffer[:-reserved_data_len]\n\n if is_data_avail:\n no_decrypt_len = len(available_data) % self.__key.block_size\n else:\n no_decrypt_len = 0\n # slicing with [:-0] does not work!\n if no_decrypt_len:\n data_to_decrypt = available_data[:-no_decrypt_len]\n else:\n data_to_decrypt = available_data\n\n need_more_data = (is_data_avail and not data_to_decrypt)\n\n if data_to_decrypt:\n self.__hmac_stream.Update(data_to_decrypt)\n self.__encrypted_buffer = self.__encrypted_buffer[len(\n data_to_decrypt):]\n decrypted_data = self.__cipher.decrypt(data_to_decrypt)\n\n if not is_data_avail:\n if len(self.__encrypted_buffer) != util.HLEN:\n raise errors.ShortCiphertextError(\n len(self.__encrypted_buffer))\n current_sig_bytes = self.__hmac_stream.Sign()\n msg_sig_bytes = self.__encrypted_buffer\n self.__encrypted_buffer = b''\n # it is important to verify mac before unpadding plaintext\n if not self.__key.hmac_key.VerifySignedData(\n current_sig_bytes, msg_sig_bytes):\n raise errors.InvalidSignatureError()\n decrypted_data = self.__key._UnPad(decrypted_data)\n\n self.__decrypted_buffer += decrypted_data\n\n if chars < 0:\n result = self.__decrypted_buffer\n self.__decrypted_buffer = b''\n else:\n result = self.__decrypted_buffer[:chars]\n self.__decrypted_buffer = self.__decrypted_buffer[chars:]\n\n if not result and is_data_avail:\n result = None\n\n return result", "def decrypt(self, key, encrypted):\n output = []\n padded_key = padd_key(key, encrypted)\n for i in range(len(encrypted)):\n dec_ascii = (ord(encrypted[i]) - ord(padded_key[i])) % 256\n output.append(chr(dec_ascii))\n return ''.join(output)", "def decrypt(self, b):\n decrypted = self.__aes.ecbDecrypt(b)\n return unpadPkcs7(decrypted, 16)", "def Decrypt(self, input_bytes):\n data_bytes = input_bytes[keyczar.HEADER_SIZE:] # remove header\n if len(data_bytes) < self.block_size + util.HLEN: # IV + sig\n raise errors.ShortCiphertextError(len(data_bytes))\n\n iv_bytes = data_bytes[:self.block_size] # first block of bytes is the IV\n ciph_bytes = data_bytes[self.block_size:-util.HLEN]\n sig_bytes = data_bytes[-util.HLEN:] # last 20 bytes are sig\n if not self.hmac_key.Verify(input_bytes[:-util.HLEN], sig_bytes):\n raise errors.InvalidSignatureError()\n\n plain = AES.new(self.key_bytes, AES.MODE_CBC, iv_bytes).decrypt(ciph_bytes)\n return self.__UnPad(plain)", "def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')", "def decrypt_attr(data, key):\n data = MegaCrypto.base64_decode(data)\n k, iv, meta_mac = MegaCrypto.get_cipher_key(key)\n attr = MegaCrypto.cbc_decrypt(data, k)\n\n #: Data is padded, 0-bytes must be stripped\n return json.loads(\n re.search(r'{.+?}', attr).group(0)) if attr[:6] == 'MEGA{\"' else False", "def decrypt(self, encrypted: str) -> str: # type: ignore\n passphrase = self.passphrase\n encrypted = base64.b64decode(encrypted) # type: ignore\n assert encrypted[0:8] == b\"Salted__\"\n salt = encrypted[8:16]\n key_iv = self.bytes_to_key(passphrase.encode(), salt, 32 + 16)\n key = key_iv[:32]\n iv = key_iv[32:]\n aes = AES.new(key, AES.MODE_CBC, iv)\n try:\n return self.unpad(aes.decrypt(encrypted[16:])).decode() # type: ignore\n except UnicodeDecodeError:\n raise ValueError(\"Wrong passphrase\")", "def decryptData(self, key, iv, data, align = True):\r\n\t\tif((len(data) % self.align) != 0 and align):\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).decrypt(data + (\"\\x00\" * (self.align - (len(data) % self.align))))\r\n\t\telse:\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).decrypt(data)", "def decrypt_kms_data(encrypted_data):\n if not AWS_REGION:\n return\n\n kms = boto3.client('kms', region_name=AWS_REGION)\n\n decrypted = kms.decrypt(CiphertextBlob=encrypted_data)\n\n if decrypted.get('KeyId'):\n # Decryption succeed\n decrypted_value = decrypted.get('Plaintext', '')\n if isinstance(decrypted_value, bytes):\n decrypted_value = decrypted_value.decode('utf-8')\n return decrypted_value", "def decrypt_data_key(self, dataKeyCypher, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKeyCypher, str):\n dataKeyCypher = dataKeyCypher.encode('cp855')\n try:\n plainText = box.decrypt(dataKeyCypher).decode('utf-8')\n except Exception:\n raise UnableToDecryptException(\"Unable to verify cyphertext/key pair\")\n return plainText", "def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)", "def decrypt(self, cryptod, secret):\n try:\n # From json to python crypto dict\n data = base64.b64decode(\n bytes(cryptod['ciphervalue'], encoding=self.encoding))\n # Decrypt\n iv = base64.b64decode(bytes(cryptod['iv'], encoding=self.encoding))\n algorithm = self._algorithm(\n secret=secret, name=cryptod['algorithm'])\n cipher = Cipher(algorithm, modes.CBC(iv), backend=self.backend)\n decryptor = cipher.decryptor()\n data = decryptor.update(data) + decryptor.finalize()\n # Unpad\n unpadder = padding.PKCS7(cipher.algorithm.block_size).unpadder()\n data = unpadder.update(data) + unpadder.finalize()\n # Unzip\n data = str(gzip.decompress(data), encoding=self.encoding)\n cipher = None\n # json string\n except ValueError as ve:\n raise ValueError('Decrypt failure!') from ve\n try:\n data = json.loads(data)\n except ValueError as ve:\n raise ValueError('JSON formatting failure!') from ve\n return data", "def decrypt_device_line(self, base64_data: bytes) -> bytes:\r\n # this can fail if the line is missing or has extra :'s, the case is handled as line error\r\n iv, base64_data = base64_data.split(b\":\")\r\n iv = decode_base64(iv)\r\n raw_data = decode_base64(base64_data)\r\n \r\n # handle cases of no data, and less than 16 bytes of data, which is an equivalent scenario.\r\n if not raw_data or len(raw_data) < 16:\r\n raise InvalidData()\r\n if not iv or len(iv) < 16:\r\n raise InvalidIV()\r\n \r\n # CBC data encryption requires alignment to a 16 bytes, we lose any data that overflows that length.\r\n overflow_bytes = len(raw_data) % 16\r\n \r\n if overflow_bytes:\r\n # print(\"\\n\\nFOUND OVERFLOWED DATA\\n\\n\")\r\n # print(\"device os:\", self.participant.os_type)\r\n # print(\"\\n\\n\")\r\n raw_data = raw_data[:-overflow_bytes]\r\n \r\n try:\r\n decipherer = AES.new(self.aes_decryption_key, mode=AES.MODE_CBC, IV=iv)\r\n decrypted = decipherer.decrypt(raw_data)\r\n except Exception:\r\n if iv is None:\r\n len_iv = \"None\"\r\n else:\r\n len_iv = len(iv)\r\n if raw_data is None:\r\n len_data = \"None\"\r\n else:\r\n len_data = len(raw_data)\r\n if self.aes_decryption_key is None:\r\n len_key = \"None\"\r\n else:\r\n len_key = len(self.aes_decryption_key)\r\n # these print statements cause problems in getting encryption errors because the print\r\n # statement will print to an ascii formatted log file on the server, which causes\r\n # ascii encoding error. Enable them for debugging only. (leave uncommented for Sentry.)\r\n # print(\"length iv: %s, length data: %s, length key: %s\" % (len_iv, len_data, len_key))\r\n # print('%s %s %s' % (patient_id, key, orig_data))\r\n raise\r\n \r\n # PKCS5 Padding: The last byte of the byte-string contains the number of bytes at the end of the\r\n # bytestring that are padding. As string slicing in python are a copy operation we will\r\n # detect the fast-path case of no change so that we can skip it\r\n num_padding_bytes = decrypted[-1]\r\n if num_padding_bytes:\r\n decrypted = decrypted[0: -num_padding_bytes]\r\n return decrypted", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv", "def decrypt(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv=iv)\n return decryptor.decrypt(data)", "def unpack(\n data: bytes,\n crypto: AuxiliaryStreamCrypto,\n client_data: bool = False\n) -> bytes:\n # Split header from rest of data\n header, payload, hmac = data[:4], data[4:-32], data[-32:]\n\n parsed = aux_header_struct.parse(header)\n\n if not crypto.verify(header + payload, hmac):\n raise AuxiliaryPackerException('Hash verification failed')\n\n if not client_data:\n plaintext = crypto.decrypt(payload)\n else:\n plaintext = crypto.decrypt_client(payload)\n\n # Cut off padding, before returning\n return plaintext[:parsed.payload_size]", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def decrypt(self, msg):\n if self.security_type is not None and self.security_type != 0:\n res, used, _ = gss.unwrap(self.ctx, msg)\n isconf = self.security_type == gss.RequirementFlag.confidentiality\n if (not used and isconf):\n raise GSSClientError('User requested encryption, '\n 'but the server sent an unencrypted '\n 'message!')\n return res.decode('utf-8')\n else:\n return msg.decode('utf-8')", "def decrypt(self, enc, use_base64=True, decode_text=True):\n if use_base64:\n enc = base64.b64decode(enc)\n\n decryptor = self.cipher.decryptor()\n raw = self._unpad(decryptor.update(enc) + decryptor.finalize())\n return raw.decode(\"utf-8\") if decode_text else raw", "def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt(self, key, data, mode, padding):\n # pylint: disable=unused-argument,no-self-use\n if hasattr(key, \"public_bytes\"):\n raise NotImplementedError('\"decrypt\" is not supported by public keys')\n try:\n return key.decrypt(data, padding.build())\n except Exception:\n error_message = \"Decryption failed\"\n _LOGGER.exception(error_message)\n raise DecryptionError(error_message)", "def decrypt(data, private_key):\r\n\r\n # Retrieve session key, tag, ciphertext and nonce from file\r\n enc_session_key, nonce, tag, ciphertext = \\\r\n [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]\r\n\r\n\r\n # Decrypt the session key\r\n session_key = cipher_rsa.decrypt(enc_session_key)\r\n\r\n # Decrypt the data with the AES session key\r\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\r\n\r\n return data", "def decrypt(self, cipher_text, iv=\"\", auth_data=None, tag=b\"\"):\n if not iv:\n raise ValueError(\"Missing Nonce\")\n\n return self.key.decrypt(iv, cipher_text + tag, auth_data)", "def decrypt(self, cipherText, additionalData=''):\n # warning only valid in the random oracle\n mac_key = sha2(b'Poor Mans Key Extractor'+self._key).digest()\n mac = MessageAuthenticator(mac_key)\n if not mac.verify(cipherText, additionalData=additionalData):\n raise ValueError(\"Invalid mac. Your data was tampered with or your key is wrong\")\n else:\n return super(AuthenticatedCryptoAbstraction, self).decrypt(cipherText['msg'])", "def decode(self, data: bytes) -> bytes:\n ...", "def _decrypt_string(self, event):\n _LOGGER.debug(\"Hub: Decrypt String: Original: %s\", str(event.encrypted_content))\n resmsg = self._decrypter.decrypt(unhexlify(event.encrypted_content)).decode(\n encoding=\"UTF-8\", errors=\"replace\"\n )\n _LOGGER.debug(\"Hub: Decrypt String: Decrypted: %s\", resmsg)\n event.parse_decrypted(resmsg)", "def decrypt(cipher):\n setup()\n\n # Read in p, q, and d from the private key file\n ifp = open(\"private.rsa\")\n private = ifp.readlines()\n d = int(private[-1])\n\n # Read in n from the public key file\n ifp = open(\"public.rsa\")\n public = ifp.readlines()\n n = int(public[-1])\n\n # Compute c^d(mod n)\n m = str(pow(long(cipher), d, n))\n\n # Convert back to alphabets\n if len(m) % 2:\n m = '0' + m\n plaintext = ''\n for i in range(0, len(m), 2):\n plaintext += chr(int(m[i:i+2]) - 1 + ord('a'))\n\n return plaintext", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def dh_decrypt(priv, ciphertext):\n Group1,private, public = dh_get_key()#generate new DH pair for Bob\n iv=ciphertext[0]\n cipher=ciphertext[1]\n tag=ciphertext[2]\n pubA=ciphertext[3]\n \n #Bob derives shared secret key by multiplying his public key with Alice's private key\n shared2 = pubA.pt_mul(priv)#qA * dB\n print \"key from dec is\", shared2\n\n hashedKey=sha256(shared2.export()).digest()\n \n aes = Cipher(\"aes-128-gcm\")\n plain = aes.quick_gcm_dec(hashedKey[:16], iv, cipher, tag)#where to get IV and tag from ???\n \n return plain.encode(\"utf8\")", "def rc4_decode(data, key, decode=base64.b64decode, salt_length=16):\n if decode:\n data = decode(data)\n salt = data[:salt_length]\n return crypt(data[salt_length:], sha1(key + salt).digest())", "def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed", "def decrypt_string(self, encrypted_string):\n return self.fernet_instance.decrypt(encrypted_string.encode('utf-8')).decode('utf-8')", "def decipher_raw2(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n stringa = str(b'\\xff\\xd8\\xff').replace('\\'', '')\n for i in range(len(u))[::2]:\n e = [decrypt2(u[i], u[i + 1], key)]\n i = b''.join([struct.pack('2I', ee, ef) for ee, ef in e])\n\n prova = str(i).replace('\\'', '')\n\n #lel = prova.find(stringa)\n\n if prova.find(stringa) != -1:\n print(\"detect format file: JPG\")\n return 0\n else:\n return 1", "def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def _decode_encrypted_part(self, value):\n\n return encoding_utils.base64_to_bytes(value)", "def heat_decrypt(value, encryption_key=None):\n encryption_key = get_valid_encryption_key(encryption_key)\n auth = base64.b64decode(value)\n iv = auth[:AES.block_size]\n cipher = AES.new(encryption_key, AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def decode(self, data):\n encoding = getattr(self, 'encoding', 'ascii')\n return data.decode(encoding, 'ignore')", "def decrypt(self, text):\n return self.encrypt(text)" ]
[ "0.76379395", "0.72864676", "0.681302", "0.6796825", "0.67245364", "0.67193043", "0.66916806", "0.66666394", "0.6586855", "0.6586855", "0.6545387", "0.6543017", "0.64966065", "0.6484453", "0.6446464", "0.643858", "0.6381135", "0.6379265", "0.63670003", "0.63490975", "0.63311183", "0.62581235", "0.6209814", "0.6204261", "0.6180791", "0.61614555", "0.61368495", "0.61284804", "0.6120521", "0.6113725", "0.61031115", "0.6097044", "0.609176", "0.6060359", "0.6038271", "0.6036855", "0.6035785", "0.60345966", "0.6030728", "0.6027705", "0.5996813", "0.5993362", "0.59840685", "0.5980172", "0.59766805", "0.5973923", "0.5968384", "0.5948311", "0.59467185", "0.5936892", "0.59083843", "0.5904231", "0.590388", "0.5893672", "0.58880746", "0.588385", "0.5842384", "0.58316076", "0.5825708", "0.5817537", "0.58166975", "0.58146435", "0.58081174", "0.57993203", "0.57968616", "0.5793388", "0.5784978", "0.5781614", "0.5778303", "0.57602346", "0.5753137", "0.5737263", "0.5734143", "0.5702788", "0.5694897", "0.5693294", "0.5689492", "0.56804943", "0.5672836", "0.5672725", "0.56717724", "0.56602603", "0.5658928", "0.5658537", "0.5655391", "0.5654538", "0.5647982", "0.56467706", "0.5641713", "0.5640001", "0.5639733", "0.56306696", "0.5630542", "0.560334", "0.5599884", "0.5595278", "0.55948424", "0.5591332", "0.5578481", "0.55780196" ]
0.5746602
71
Make a model where each trial has its own regressor using least squares all (LSA)
def _lsa_events_converter(events_file): import pandas as pd events = pd.read_csv(events_file, sep='\t') events['original_trial_type'] = events['trial_type'] for cond, cond_df in events.groupby('trial_type'): cond_idx = cond_df.index for i_trial, trial_idx in enumerate(cond_idx): trial_name = '{0}_{1:04d}'.format(cond, i_trial+1) events.loc[trial_idx, 'trial_type'] = trial_name return events
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize(self, trial):\r\n num_leaves = trial.suggest_int(\"num_leaves\", 6, 50)\r\n min_child_samples = trial.suggest_int(\"min_child_samples\", 100, 500)\r\n min_child_weight = trial.suggest_uniform(\"min_child_weight\", 1, 7)\r\n subsample = trial.suggest_uniform(\"subsample\", 0.6, 1)\r\n colsample_bytree = trial.suggest_uniform(\"colsample_bytree\", 0.6, 1)\r\n reg_alpha = trial.suggest_uniform(\"reg_alpha\", 0.1, 100)\r\n reg_lambda = trial.suggest_uniform(\"reg_lambda\", 0.1, 100)\r\n\r\n model = LGBMRegressor(\r\n num_leaves=num_leaves,\r\n min_child_samples=min_child_samples,\r\n min_child_weight=min_child_weight,\r\n subsample=subsample,\r\n colsample_bytree=colsample_bytree,\r\n reg_alpha=reg_alpha,\r\n reg_lambda=reg_lambda,\r\n )\r\n\r\n model = ModelTrainer(file_object=self.file_object).get_trained_model(\r\n model, self.X_train, self.y_train\r\n )\r\n r_squared, rmse = ModelScorer(file_object=self.file_object).get_model_scores(\r\n model, self.X_test, self.y_test\r\n )\r\n\r\n return r_squared", "def get_model( ):\n\n return Lasso(alpha = 1e-3, fit_intercept = True, precompute = True, max_iter = 1e4)", "def generate_models(R, u_t, inverse_transform, algo):\n model_list = []\n it_max = 10000 # maximum number of iterations after which the Lasso and SR3 are stopped to save computational time\n # in our experience, if the model converges at all, this is usually far sooner than 10000 iterations\n tol_iterativ = 10 * np.finfo(float).eps # convergence tolerance of SR3 and Lasso\n if algo == 'FoBa':\n log_epsilon_range = np.arange(-15., 15., 0.5)\n for log_epsilon in log_epsilon_range:\n w = FoBa(R, u_t, epsilon=10 ** log_epsilon, backwards_freq=1, maxit_f=20)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'Lasso':\n log_lambda_range = np.arange(-15., 15., 0.5) # l1 factor\n for log_lambda in log_lambda_range:\n # initialize Lasso model\n clf = linear_model.Lasso(alpha=10**log_lambda, copy_X=True, fit_intercept=True, max_iter=it_max,\n normalize=False, positive=False, precompute=False, random_state=None,\n selection='cyclic', tol=tol_iterativ, warm_start=False)\n clf.fit(R, u_t) # fit model\n w = clf.coef_\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'STRidge':\n log_lambda_range = np.arange(-15, 15., 1.) # l2 factor (Ridge)\n log_tol_range = np.arange(-16, 10., 1.)\n for log_lambda in log_lambda_range:\n for log_tol in log_tol_range:\n w = STRidge(R, u_t, maxit=1000, lam=10**log_lambda, tol=10**log_tol, normalize=2)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'SR3':\n # Uses python-matlab interface to directly use the original SR3 implementation.\n # Note that setting up the interface can be a bit tricky; if setting up the interface is too much effort,\n # just leave SR3 out of the 'algo_list' in the SITE file.\n t_sr3_start = time.time()\n eng = matlab.engine.start_matlab()\n eng.setup_matlab(nargout=0)\n log_lambda_range = np.arange(-15, 15., 1.) # l1 factor\n log_kappa_range = np.arange(-5, 6., 1.)\n for log_kappa in log_kappa_range:\n for log_lambda in log_lambda_range:\n R_matlab = matlab.double(R.tolist())\n u_t_matlab = matlab.double(u_t.tolist())\n # iters can be used to check if model converged or it_max was reached\n x, w, iters = eng.sr3(R_matlab, u_t_matlab, 'mode', '0', 'kap', (10**log_kappa).item(), 'lam',\n (10**log_lambda).item(), 'itm', it_max, 'tol', tol_iterativ.item(), 'ptf',\n 45000, nargout=3)\n w = np.asarray(w)\n initialize_model(w, model_list, algo, inverse_transform)\n eng.quit()\n print('Time for evaluation SR3: ', time.time() - t_sr3_start)\n\n else: raise ('The algorithm ' + str(algo) + ' is not implemented! (or a typo)')\n return model_list", "def experiment_models(train, test, train_target, test_target):\n # Linear models\n linear_models = [(LinearRegression, {\"n_jobs\": -1}),\n (Lasso, {\"alpha\": 3}),\n (Ridge, {\"alpha\": 3}),\n (LinearSVR, {\"random_state\": 0, \"tol\": 1e-5})]\n\n # Add polynomial features\n poly = preprocessing.PolynomialFeatures(2)\n\n # scaler\n scaler = preprocessing.StandardScaler().fit(train)\n\n print(\"Use linear models with linear features\")\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")\n\n print(\"Use linear models with polynomial features\")\n train = poly.fit_transform(train)\n test = poly.transform(test)\n scaler = preprocessing.StandardScaler().fit(train)\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")", "def train_model(X_train, y_train):\n rgs = linear_model.Lasso(alpha=0.1)\n rgs.fit(X_train, y_train)\n return rgs", "def ModelRegression():\n \n fs=125\n win_len = 10\n win_shift = 2\n \n # load the data file\n data_fls, ref_fls = LoadTroikaDataset()\n targets, features, sigs, subs = [], [], [], []\n for data_fl, ref_fl in (zip(data_fls, ref_fls)):\n \n # load the signal\n sig = LoadTroikaDataFile(data_fl)\n ref = LoadTroikaRefFile(ref_fl)\n ref = np.array([x[0] for x in ref])\n subject_name = os.path.basename(data_fl).split('.')[0] \n start_indxs, end_indxs = get_indxs(sig.shape[1], len(ref), fs, win_len,win_shift)\n for i, s in enumerate(start_indxs):\n start_i = start_indxs[i]\n end_i = end_indxs[i]\n\n ppg = sig[0, start_i:end_i] \n accx = sig[1, start_i:end_i]\n accy = sig[2, start_i:end_i]\n accz = sig[3, start_i:end_i]\n \n #band pass the channels\n ppg = BandpassFilter(ppg)\n accx = BandpassFilter(accx)\n accy = BandpassFilter(accy)\n accz = BandpassFilter(accz)\n \n # creates the features\n feature, ppg, accx, accy, accz = FeatureExtraction(ppg, accx, accy, accz)\n\n sigs.append([ppg, accx, accy, accz])\n targets.append(ref[i])\n features.append(feature)\n subs.append(subject_name)\n \n targets = np.array(targets)\n features = np.array(features)\n \n # set a Random Forest Regressor model\n #classifier = RandomForestClassifier(n_estimators=100,\n # max_depth=10,\n # random_state=42,\n # class_weight='balanced')\n \n regression = RandomForestRegressor(n_estimators=200,max_depth=10)\n \n lf = KFold(n_splits=5)\n splits = lf.split(features,targets,subs)\n \n # split the data and fit the model\n for i, (train_idx, test_idx) in enumerate(splits):\n X_train, y_train = features[train_idx], targets[train_idx]\n X_test, y_test = features[test_idx], targets[test_idx]\n regression.fit(X_train, y_train)\n \n return regression", "def nnRegression(data):", "def test_SLM():\n samples = 10\n predictors = 3\n\n grid = list(create_parameter_grid(samples, predictors))\n Y = np.random.rand(samples, 10242, predictors)\n\n for i in range(len(grid)):\n # Skip exceptions that we know error.\n if grid[i][\"surf\"] is None:\n if grid[i][\"correction\"] is not None and \"rft\" in grid[i][\"correction\"]:\n continue\n if grid[i][\"Y_idx\"] > 1 and grid[i][\"two_tailed\"] is False:\n continue\n\n try:\n slm = SLM(\n model=grid[i][\"model\"],\n contrast=grid[i][\"contrast\"],\n surf=grid[i][\"surf\"],\n mask=grid[i][\"mask\"],\n correction=grid[i][\"correction\"],\n two_tailed=grid[i][\"two_tailed\"],\n )\n slm.fit(Y[:, :, 0 : grid[i][\"Y_idx\"]])\n except Exception as e:\n print(\"Error on run:\", i)\n print(\"SLM failed with the following parameters:\")\n print(\"Model: \", grid[i][\"model\"])\n print(\"Contrast: \", grid[i][\"contrast\"])\n print(\"Surface: \", grid[i][\"surf\"])\n print(\"Mask: \", grid[i][\"mask\"])\n print(\"Correction: \", grid[i][\"correction\"])\n print(\"Two_tailed: \", grid[i][\"two_tailed\"])\n print(\"Y_idx: \", grid[i][\"Y_idx\"])\n raise e", "def best_fit(x, y, z, z_real, p = list(range(3, 15)), folds = 4, train = 0.7, seed = 42, n_lambda = 2001, n = 1, m = 1):\n lambdas = np.array([0] + np.logspace(-5.5, -1, n_lambda).tolist())\n polynomials = np.array(p)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n lambda_min_ridge = np.zeros(len(polynomials))\n lambda_min_lasso = np.zeros(len(polynomials))\n R2 = np.zeros((3, len(polynomials)))\n MSE = np.zeros((3, len(polynomials)))\n\n R2_data = np.zeros((3, len(polynomials)))\n MSE_data = np.zeros((3, len(polynomials)))\n\n\n for i in range(len(polynomials)):\n print(i + polynomials[0])\n ridge_sum = 0\n lasso_sum = 0\n model = regression(x, y, z, split = True, train = train, seed = seed, k = polynomials[i])\n z_test = np.ravel(np.copy(model.z_test))\n for j in range(n): #The mean of n times\n ridge_sum += model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True, n_lambda = n_lambda)[0]\n for j in range(m): #The mean of m times\n lasso_sum += model.lambda_best_fit(method = 'Lasso', fold = folds, n_lambda = n_lambda)[0]\n lambda_min_ridge[i] = ridge_sum/n\n lambda_min_lasso[i] = lasso_sum/m\n\n _,_, a, z_real_test = model.train_test(X = model.X_full, z = z_real, train = 0.7, seed = seed) #Both the training set and the test set for z_real in that order in list/tuple\n\n Beta_ols = model.OLS()\n Beta_ridge = model.Ridge(lam = lambda_min_ridge[i])\n Beta_lasso = model.Lasso(lam = lambda_min_lasso[i], max_iter = 1001)\n\n z_tilde_OLS = model.z_tilde(Beta_ols, X = model.X_test)\n z_tilde_Ridge = model.z_tilde(Beta_ridge, X = model.X_test)\n z_tilde_Lasso = model.z_tilde(Beta_lasso, X = model.X_test)\n\n R2[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_real_test)\n R2[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_real_test)\n R2[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n MSE[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_real_test)\n MSE[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_real_test)\n MSE[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n R2_data[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_test)\n R2_data[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_test)\n R2_data[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_test)\n\n MSE_data[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_test)\n MSE_data[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_test)\n MSE_data[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_test)\n\n _, _, lambdas = model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True)\n\n min_MSE = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n min_R2 = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n\n print('Minimum MSE with Frank, OLS: ', np.min(MSE[0]), ' Ridge: ', np.min(MSE[1]), ' Lasso: ', np.min(MSE[2]))\n print('With polynoms: ', np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Minimum MSE with Data, OLS: ', np.min(MSE_data[0]), ' Ridge: ', np.min(MSE_data[1]), ' Lasso: ', np.min(MSE_data[2]))\n print('With polynoms: ', np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2[0]), ' Ridge: ', np.max(R2[1]), ' Lasso: ', np.max(R2[2]))\n print('With polynoms: ', np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2_data[0]), ' Ridge: ', np.max(R2_data[1]), ' Lasso: ', np.max(R2_data[2]))\n print('With polynoms: ', np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n\n error_mins = np.array([[np.min(MSE[0]), np.min(MSE[1]), np.min(MSE[2])],\n [np.min(MSE_data[0]), np.min(MSE_data[1]), np.min(MSE_data[2])],\n [np.max(R2[0]), np.max(R2[1]) , np.max(R2[2])],\n [np.max(R2_data[0]), np.max(R2_data[1]), np.max(R2_data[2])],\n [np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0]],\n [np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0]],\n [np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0]],\n [np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0]]]).T\n\n text = ['MSE Franke', 'MSE Data','R\\(^2\\) Franke', 'R\\(^2\\) Data']\n print(latex_print(error_mins, text = text))\n\n print('Ridge lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Ridge lambda, lowest indexes for Data: ', np.argmin(MSE_data[2]))\n print(lambda_min_ridge)\n print('Lasso lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Lasso lambda, lowest indexes for Data: ', np.argmin(R2_MSE[2]))\n print(lambda_min_lasso)\n #Real Franke\n\n plt.plot(polynomials, R2[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE.png')\n\n plt.show()\n\n #Noise Franke\n\n plt.plot(polynomials, R2_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and data', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly_data.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and data', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE_data.png')\n\n plt.show()\n\n #Polynomial and lambda\n\n plt.plot(polynomials, lambda_min_ridge, 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, lambda_min_lasso, 'go--', label = 'Lasso', color = 'green')\n\n plt.title('The \\'best\\' lambda pr polynomial')\n plt.ylabel('Lambda')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n plt.savefig(results_dir + 'ridge_lasso_lambda_poly.png')\n plt.show()", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def create_sts_model(train_x, train_y):\n model = GaussianNB()\n model.fit(train_x, train_y)\n save_model(model, \"simple_time_series\")\n return model", "def model():\n return TimeSeriesMultiReg()", "def nnls_fit(self):\n\n def sprod(a,b): #simplecting inner product between two Pauli operators\n return int(not a.commutes(b))\n\n F1 = [] #First list of terms\n F2 = [] #List of term pairs\n fidelities = [] # list of fidelities from fits\n\n for datum in self._term_data.values():\n F1.append(datum.pauli)\n fidelities.append(datum.fidelity)\n #If the Pauli is conjugate to another term in the model, a degeneracy is present\n if self._issingle(datum):\n F2.append(datum.pauli)\n else:\n pair = datum.pair\n F2.append(pair)\n\n #create commutativity matrices\n M1 = [[sprod(a,b) for a in F1] for b in F1]\n M2 = [[sprod(a,b) for a in F1] for b in F2]\n\n #check to make sure that there is no degeneracy\n if np.linalg.matrix_rank(np.add(M1,M2)) != len(F1):\n raise Exception(\"Matrix is not full rank, something went wrong!\")\n \n #perform least-squares estimate of model coefficients and return as noisemodel \n coeffs,_ = nnls(np.add(M1,M2), -np.log(fidelities)) \n self.noisemodel = NoiseModel(self.layer._cliff_layer, F1, coeffs)", "def linear_regression(x_train, t_train, basis, bias,reg_lambda=0, degree=1, mu=0, s=1):\n \n # Construct the design matrix.\n # Pass the required parameters to this function\n \n phi = design_matrix(x_train,basis,degree,bias,mu,s) \n #print(x_train.shape) \n # Learning Coefficients\n if reg_lambda > 0:\n I=np.identity((phi.shape[1]),dtype=int)\n inv = np.linalg.inv((reg_lambda*I)+(phi.T@phi))\n w = inv@(phi.T@t_train) \n # regularized regression\n else:\n # no regularization \n w = np.linalg.pinv(phi)@t_train\n \n pred_train=phi@w\n train_err = np.sqrt((np.square(pred_train-t_train)).mean())\n return (w, train_err)", "def get_linear_model():\n\n ss = StandardScaler()\n lr = LogisticRegression(penalty='l2', max_iter=1000, class_weight=None) # ridge\n\n lr_model = Pipeline(steps=(['scale', ss], ['clf', lr])) # pipeline\n\n lr_model_params = {\n 'clf__C':loguniform(1e-3,1e3)\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV\n linear_model = RandomizedSearchCV(lr_model, lr_model_params, n_iter=100, cv=3)\n\n return clone(linear_model)", "def lin_reg():\n \n year = 2013\n \n # import temperature and ridership data\n data_array = process_data()\n \n # select month, day, hour, temperature, precipitation, and snow data from data_array\n X = data_array[:,[1,2,3]]\n # select ridership data from data_array\n Y = data_array[:,4]\n\n # make array vertical so that scikit-learn can process it\n X = X.reshape(X.shape[0], -1)\n Y = Y.reshape(Y.shape[0], -1)\n\n # splits data into training and testing bits\n X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=0.5)\n \n # sets degree of polynomial regression\n # in testing, anything greater than 7 will give a MemoryError\n degrees = 7\n\n # initalize scikit-learn model\n model = make_pipeline(PolynomialFeatures(degrees), Ridge())\n\n # fits a model to training data\n print 'fitting model...'\n model.fit(X_train, y_train)\n\n # scores model\n print \"Year %d, %d degree polynomial regression\" % (year, degrees)\n print \"Train R^2 %f\"%model.score(X_train, y_train)\n print \"Test R^2 %f\"%model.score(X_test, y_test)\n\n # pickles and saves model\n pickle.dump(model, open('LargeDataStorage/mlModelNoWeather', 'wb'))\n pass", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def LinReg_fit(X, y, X_test=None, y_test=None, lr=1e-7, batch=1, lamb=0,\n epoch=10000, print_every=100, lamb1=0, momentum=0):\n # initialize\n W = np.random.randn(X.shape[1]) / X.shape[1] / X.shape[0]\n\n train_loss = []\n train_RMSE = []\n test_loss = []\n test_RMSE = []\n\n # batch size indicator\n b = 0\n # cache for adagrad\n G = np.zeros(W.shape)\n\n for i in range(epoch):\n inds = []\n last_step = 0\n\n for j in np.random.permutation(X.shape[0]):\n inds.append(j)\n b += 1\n\n # do the adagrad to update the parameter\n if b >= batch:\n diff = X[inds].dot(W) - y[inds]\n\n # calculate gradients\n w = np.array(W)\n w[w > 0] = 1\n w[w < 0] = -1\n grad_X = X[inds].T.dot(diff)\n grad_regulariz = lamb * W * batch / X.shape[0]\n grad_first_order_reg = lamb1 * w * batch / X.shape[0]\n grad = grad_X + grad_regulariz + grad_first_order_reg\n\n # calculate update step\n G += grad**2\n delta_W = (grad + momentum * last_step) / np.sqrt(G)\n W -= lr * delta_W\n\n # reset variables\n last_step = delta_W\n b = 0\n inds = []\n\n objective = (((X.dot(W) - y)**2).sum() + lamb * (W**2).sum()) / 2.0\n RMSE = cal_RMSE(X, W, y)\n\n if X_test is not None and y_test is not None:\n # losses\n loss_X = ((X_test.dot(W) - y_test)**2).sum() / 2.0\n loss_reg = lamb * (W**2).sum() / 2.0\n loss_first_reg = lamb1 * (abs(W).sum())\n\n obj_t = loss_X + loss_reg + loss_first_reg\n RMSE_t = cal_RMSE(X_test, W, y_test)\n\n test_loss.append(obj_t)\n test_RMSE.append(RMSE_t)\n\n # print out the progress\n if i % print_every == 0:\n if X_test is not None and y_test is not None:\n print('\\tepoch: %d; obj: %.4f; RMSE: %.4f; RMSE_test: %.4f' %\n (i, objective, RMSE, RMSE_t))\n else:\n print('\\tepoch: %d; obj: %.4f; RMSE: %.4f' %\n (i, objective, RMSE))\n\n train_loss.append(objective)\n train_RMSE.append(RMSE)\n\n print('final obj: %.4f' % train_loss[-1])\n\n return W, train_loss, train_RMSE, test_loss, test_RMSE", "def fit_lr_model(df, X_train, y_train, X_test, y_test, mask_test):\n print(\"**** LINEAR REGRESSION ****\")\n lin_mod = sm.OLS(y_train, sm.add_constant(X_train))\n fit_lin = lin_mod.fit()\n print(fit_lin.summary())\n\n y_pred_test = fit_lin.predict(sm.add_constant(X_test))\n df_test = pd.concat([df[mask_test][['player','wkts','year1_wkts_pm']].reset_index(),\n pd.DataFrame(y_pred_test).reset_index()],axis=1,)\n df_test = df_test.drop('index',axis=1)\n df_test.columns = ['player','wkts','wkts_baseline','wkts_exp']\n\n df_by_player = df_test.groupby('player').sum()\n\n print('Explained Variance (LR model): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Explained Variance (Baseline): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print('Mean Squared Error (LR model): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Mean Squared Error (Baseline): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print(' ')", "def __init__(self):\n self.reg = DummyRegressor(strategy='mean')", "def get_sgd_solution(TRAINING_PHI, TEST_PHI, VAL_PHI, W_Now, TrainingData,\n TrainingTarget, TestData, ValData):\n # Gradient Descent Solution for Linear Regression\n La = 2\n # learningRate = 0.01\n L_Erms_Val, L_Erms_TR, L_Erms_Test, L_Accuracy_Test, W_Mat = [], [], [], [], []\n\n for i in range(0, 400):\n\n # print (f'---------Iteration: {i} M{M} LR {learningRate} L :{C_Lambda}--------------')\n Delta_E_D = -np.dot(\n (TrainingTarget[i] - np.dot(np.transpose(W_Now), TRAINING_PHI[i])),\n TRAINING_PHI[i])\n La_Delta_E_W = np.dot(La, W_Now)\n Delta_E = np.add(Delta_E_D, La_Delta_E_W)\n Delta_W = -np.dot(learningRate, Delta_E)\n W_T_Next = W_Now + Delta_W\n W_Now = W_T_Next\n\n #-----------------TrainingData Accuracy---------------------#\n TR_TEST_OUT = GetValTest(TRAINING_PHI, W_T_Next)\n Erms_TR = GetErms(TR_TEST_OUT, TrainingTarget)\n L_Erms_TR.append(float(Erms_TR.split(',')[1]))\n\n #-----------------ValidationData Accuracy---------------------#\n VAL_TEST_OUT = GetValTest(VAL_PHI, W_T_Next)\n Erms_Val = GetErms(VAL_TEST_OUT, ValDataAct)\n L_Erms_Val.append(float(Erms_Val.split(',')[1]))\n\n #-----------------TestingData Accuracy---------------------#\n TEST_OUT = GetValTest(TEST_PHI, W_T_Next)\n Erms_Test = GetErms(TEST_OUT, TestDataAct)\n L_Erms_Test.append(float(Erms_Test.split(',')[1]))\n L_Accuracy_Test.append(float(Erms_Test.split(',')[0]))\n\n return ([L_Erms_TR, L_Erms_Val, L_Erms_Test, L_Accuracy_Test])", "def __train_model(self):\n for i in range(self.file_index):\n logger.info(\"Training the ALS model dataset \" + str(i))\n self.als = ALS(maxIter=5, regParam=0.01, userCol=\"UserId\", itemCol=\"GameId\", ratingCol=\"Userscore\",\n coldStartStrategy=\"drop\")\n self.model[i] = self.als.fit(self.df[i])\n logger.info(\"ALS model built!\")", "def fit(self, x: np.array, y: np.array, epochs: int = 2) -> 'LinearRegressor':\n self.rows, self.columns = x.shape\n self.rows_target, self.columns_target = y.shape\n\n self.w0 = 1.0\n self.w1 = np.random.randn(1, self.columns)\n \n print(f'Initials >> \"w0\": {self.w0}, \"w1\": {self.w1}')\n \n for _ in range(epochs):\n preds = self.predict(x)\n preds = np.reshape(preds, (-1,1))\n \n self.loss.append(np.sum(np.square(y - preds) / self.rows))\n\n dw0 = -(2 / self.rows) * np.sum(y - preds)\n dw1 = -(2 / self.rows) * x.T.dot(y - preds)\n\n self.w0 -= self.learning_rate * dw0\n self.w1 -= self.learning_rate * dw1\n #print(f'dw0: {dw0}, dw1: {dw1}')\n #print(f'\"w0\": {self.w0}, \"w1\": {self.w1}')\n \n return self", "def train(self, iterations = 100):\n arguments = ()\n print(\"training...\")\n results = optimize.minimize(self.CostFunction,x0 = self.Thetas, args = arguments, options = {'disp':True, 'maxiter': iterations}, method = \"L-BFGS-B\", jac = True)\n self.Thetas = results['x']\n FinalCost, _ = self.CostFunction(self.Thetas)\n print(\"successfully trained the model\") \n print(\"Final Cost for this model is:\", FinalCost)", "def learn(self):\r\n \r\n # unpack\r\n X = self.Train.X\r\n Y = self.Train.Y\r\n DY = self.Train.DY\r\n \r\n NX ,ND = X.shape\r\n NDY,_ = DY.shape\r\n \r\n print 'Build Information Matricies ...'\r\n \r\n # functions\r\n ay0 = np.array([[1.]]*NX)\r\n ay1 = X\r\n ay2 = np.reshape( np.einsum('ij,ik->ijk',X,X) , [-1,ND*ND] )\r\n\r\n # reduce redundant basis variables\r\n i_doub = np.tri(ND,k=-1).T == 1\r\n ay2[:,i_doub.ravel()] = ay2[:,i_doub.ravel()] * 2. \r\n i_keep = np.tri(ND,k=0).T == 1\r\n ay2 = ay2[:,i_keep.ravel()]\r\n\r\n # basis matrix, functions\r\n Ay = np.hstack([ay0,ay1,ay2])\r\n \r\n # arrays for the least squares regression\r\n At = Ay\r\n Yt = Y\r\n \r\n # gradients\r\n if NDY:\r\n ad0 = np.array([[0.]]*NX*ND)\r\n \r\n ad1 = np.tile( np.eye(ND) , [NX,1] )\r\n \r\n ad2a = np.repeat( np.eye(ND)[:,None,:] , ND , 1 )\r\n ad2a = np.reshape( ad2a , [-1,ND*ND] ) \r\n ad2a = np.repeat( ad2a, NX, axis=0 ) * np.repeat( np.tile( X, [ND,1] ) , ND, axis=1 )\r\n \r\n ad2b = np.repeat( np.eye(ND)[:,:,None] , ND , 2 )\r\n ad2b = np.reshape( ad2b , [-1,ND*ND] ) \r\n ad2b = np.repeat( ad2b, NX, axis=0 ) * np.tile( np.tile( X, [ND,1] ) , [1,ND] )\r\n \r\n ad2 = ad2a + ad2b\r\n \r\n # reduce redundant bases\r\n ad2[:,i_doub.ravel()] = ad2[:,i_doub.ravel()] * 2.\r\n ad2 = ad2[:,i_keep.ravel()] \r\n \r\n Ad = np.hstack([ad0,ad1,ad2])\r\n \r\n # add to arrays for least squares regression\r\n At = np.vstack([At,Ad])\r\n Yt = np.vstack([Yt, np.ravel(DY.T)[:,None]])\r\n \r\n print 'Least Squares Solve ...'\r\n B = sp.linalg.lstsq(At,Yt)[0] \r\n \r\n # unpack data\r\n c = B[0,0]\r\n b = B[1:ND+1]\r\n \r\n A = np.zeros([ND,ND])\r\n A[i_keep] = B[ND+1:,0]\r\n A[i_keep.T] = A.T[i_keep.T]\r\n \r\n # problem forumulation\r\n A = A*2.\r\n \r\n # store results\r\n self.c = c\r\n self.b = b\r\n self.A = A\r\n \r\n print ''", "def make_RI(self, X, y):\n# import aux_functions_strat as aux\n from aux_gps import get_RI_reg_combinations\n import warnings\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n feature_dim = [x for x in X.dims if x != self.sample_dim][0]\n regressors_list = get_RI_reg_combinations(X.to_dataset\n (dim=feature_dim))\n res_dict = {}\n for i in range(len(regressors_list)):\n keys = ','.join([key for key in regressors_list[i].\n data_vars.keys()])\n print('Preforming ML-Analysis with regressors: ' + keys +\n ', median = ' + regressors_list[i].attrs['median'])\n keys = regressors_list[i].attrs['median']\n new_X = regressors_list[i].to_array(dim=feature_dim)\n# new_X = aux.xr_order(new_X)\n new_X = new_X.transpose(..., feature_dim)\n# self = run_model_with_shifted_plevels(self, new_X, y, Target, plevel=plevels, lms=lms)\n self = self.fit(new_X, y)\n # self.fit(new_X, y)\n res_dict[keys] = self.results_\n# elif mode == 'model_all':\n# params, res_dict[keys] = run_model_for_all(new_X, y, params)\n# elif mode == 'multi_model':\n# params, res_dict[keys] = run_multi_model(new_X, y, params)\n self.results_ = produce_RI(res_dict, feature_dim)\n self.X_ = X\n return", "def regression(df_tot, fasit_key, chosen, max_p):\n\n with np.errstate(divide='ignore'):\n # First regression\n first_model = sm.OLS(df_tot[fasit_key], df_tot[chosen])\n\n # Initializing loop\n results = first_model.fit()\n chosen_p = chosen.copy()\n ant_break = 0\n\n # Looping through until final model is chosen\n while max(results.pvalues) > max_p or len(results.pvalues) >= max_final_numb_kandidater:\n if len(results.pvalues) <= min_kandidater:\n ant_break = 1 # count\n break\n chosen_p.remove(results.pvalues.idxmax()) # updating the chosen list\n\n with np.errstate(divide='ignore'):\n results = sm.OLS(df_tot[fasit_key], df_tot[chosen_p]).fit() # regression\n\n return results, chosen_p, ant_break", "def least_squares_training(self, inputs, targets):\n self._rbf_forward(inputs)\n a = self.rbf_outputs.T @ self.rbf_outputs\n b = self.rbf_outputs.T @ targets\n self.slp_weights = np.linalg.solve(a, b)", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def fit_model(X_train, X_test, y_train, y_test, model):\n \n if model == 'LinearRegression':\n \n regressor=LinearRegression()\n regressor.fit(X_train,y_train)\n y_pred =regressor.predict(X_test)\n r2 = r2_score(y_test, y_pred)\n \n elif model == 'Lasso':\n \n lasso = Lasso()\n lasso.fit(X_train, y_train)\n lasso_pred = lasso.predict(X_test)\n r2 = r2_score(y_test, lasso_pred)\n\n elif model == 'Ridge':\n \n ridge = Ridge()\n ridge.fit(X_train, y_train)\n ridge_pred = ridge.predict(X_test)\n r2 = r2_score(y_test, ridge_pred)\n \n \n else:\n model = make_pipeline(PolynomialFeatures(2), LinearRegression())\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n r2= r2_score(y_test,y_pred)\n\n\n return r2", "def linear(self, verbose=0):\n\n # Output linear regression summary with coefficients and p-values\n # if desired\n if verbose != 0:\n model = sm.OLS(self.y_train, sm.add_constant(self.X_train)).fit()\n print(model.summary())\n\n linear_regressor = LinearRegression(fit_intercept=True, normalize=False,\n copy_X=True)\n linear_score = np.mean(cross_val_score(\n estimator=linear_regressor, X=self.X_train, y=self.y_train,\n cv=5, scoring=self.scorer))\n print('Linear score: ' + str(linear_score))\n return linear_regressor", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params", "def sslim_train(A, B, l1_reg=0.001, l2_reg=0.0001):\n alpha = l1_reg + l2_reg\n l1_ratio = l1_reg / alpha\n\n model = SGDRegressor(\n penalty='elasticnet',\n fit_intercept=False,\n alpha=alpha,\n l1_ratio=l1_ratio\n )\n\n # Following cSLIM proposal on creating an M' matrix = [ M, FT]\n # * alpha is used to control relative importance of the side information\n #Balpha = np.sqrt(alpha) * B\n B = B[:, :-3]\n Balpha = B\n\n Mline = vstack((A, Balpha), format='lil')\n m, n = A.shape\n\n # Fit each column of W separately\n W = lil_matrix((n, n))\n\n columns = Mline.shape[1]\n\n for j in range(columns):\n if j % 50 == 0:\n print '-> %2.2f%%' % ((j / float(columns)) * 100)\n\n mlinej = Mline[:, j].copy()\n\n # We need to remove the column j before training\n Mline[:, j] = 0\n\n model.fit(Mline, mlinej.toarray().ravel())\n\n # We need to reinstate the matrix\n Mline[:, j] = mlinej\n\n w = model.coef_\n\n # Removing negative values because it makes no sense in our approach\n w[w < 0] = 0\n\n for el in w.nonzero()[0]:\n W[(el, j)] = w[el]\n\n return W", "def train_model(args, tr_sparse):\n tf.logging.info('Train Start: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n \n # generate model\n input_tensor, row_factor, col_factor, model = wals.wals_model(tr_sparse,\n args.latent_factors,\n args.regularization,\n args.unobs_weight,\n args.weights,\n args.wt_type,\n args.feature_wt_exp,\n args.feature_wt_factor)\n \n # factorize matrix\n session = wals.simple_train(model, input_tensor, args.num_iters)\n \n tf.logging.info('Train Finish: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n \n # evaluate output factor matrices\n output_row = row_factor.eval(session=session)\n output_col = col_factor.eval(session=session)\n \n # close the training session \n session.close()\n \n return output_row, output_col", "def LinearRegr(synth_sample, real_sample, label, n_cores=1):\n train_col = list(set(synth_sample.columns) - set([label]))\n \n X_test = real_sample[train_col]\n y_test = real_sample[label]\n \n X_train = synth_sample[train_col]\n y_train = synth_sample[label]\n \n model = LinearRegression(n_jobs=n_cores)\n y_pred = model.fit(X_train, y_train).predict(X_test)\n \n return np.sqrt(mean_squared_error(y_test, y_pred))", "def evaluate_regression(x, t, w, basis, degree):\n \t# TO DO:: Compute t_est and err \n #w_tranpose=w.T\n\n\n # My logic goes as follows:\n # Definition of test error is when you run the trained\n # model against a dataset that it hasn't been exposed to\n # this dataset is known as the testset \n\n # As such the basic algorithm goes as follows:\n # We do not need to recompute the weights but we need to recompute\n # phi for our test data\n\n # As such, we are interested in how well our trained weights\n # estimate against the test data so we matrix multiply our\n # weights against the phi from our test data\n # thus t_est = w_train.T*phi(x) since we want to know how well our\n # trained model estimates against the training data\n # but in implementation we do phi(x)*w_train\n # to match array dimensions \n\n\n #Compute design matrix from test data \n phi=design_matrix(x,basis,degree)\n phi_cross=np.linalg.pinv(phi)\n\n # Compute testing weights // just in case we require this variable\n #if(t is not None):\n #w_test=phi_cross.dot(t)\n #w_test=phi_cross.dot(t)\n\n # We want to be able to index into our target vector\n\n #t_est=phi.dot(w_test)\n #if (t is not None):\n # testing_estimate=phi.dot(w_test)\n #testing_estimate=phi.dot(w_test)\n\n # Estimate of our targets according to test data against learned \n # coefficients\n t_est=phi.dot(w)\n #print(\"t_est\",t_est)\n #t_est = None\n\n # We calculate the RMS error as follows\n # Take equation 3.12 of PRML and modify as follows\n # My logic:\n # The equation given in PRML gives the SSE (sum of squares error)\n # By definition the MSE (mean squared error) takes the SSE and divides \n # it by population size, we also preserve the 1/2 constant \n # throughout our calcuations \n # Afterwards we take our MSE and square root it.\n\n # Compute difference between target and estimate\n\n if(t is not None):\n \n diff=t-t_est\n # Square all observations\n diff_squared=np.power(diff,2)\n # Sum up all the observations in our vector\n sig_squared=diff_squared.sum()\n half_sig_squared=0.5*(sig_squared)\n # Calculate population size\n population_size=t.shape[0]\n rmse=np.sqrt(half_sig_squared/population_size)\n err=rmse\n else:\n err=None\n\n #diff=t-t_est\n\n\n # Square all observations \n #diff_squared=np.power(diff,2)\n\n # Sum up all the observations in our vector\n #sig_squared=diff_squared.sum()\n\n #half_sig_squared=0.5*(sig_squared)\n\n # Calculate population size\n #population_size=t.shape[0]\n\n #rmse=np.sqrt(half_sig_squared/population_size)\n #err = rmse\n #print(\"err inside function\",err)\n #err=rmse\n return (t_est, err)", "def fit_LuEd(self, wl, Ls, Lu, Ed, params, weights, verbose=True):\n\n\t\t\tdef min_funct(params):\n\t\t\t\tp = params.valuesdict() \n\t\t\t\n\t\t\t\tRrs_modelled, Rrs_refl, Lu_Ed_modelled = self.model(beta = p['beta'], alpha = p['alpha'], am = p['am'], rh = p['rh'], pressure = p['pressure'], C_chl = p['C_chl'], C_sm = p['C_sm'], C_mie = p['C_mie'], n_mie = p['n_mie'], C_y = p['C_y'], S_y = p['S_y'], T_w = p['T_w'], theta_sun = p['theta_sun'], theta_view = p['theta_view'], n_w = p['n_w'], rho_s = p['rho_s'], rho_dd = p['rho_dd'], rho_ds = p['rho_ds'], delta = p['delta'], wl = wl, a_w = self.spectra['a_w'].values, daw_dT = self.spectra['daw_dT'].values, astar_ph = self.spectra['astar_ph'].values, astar_y = self.spectra['astar_y'].values, Ls_Ed = Ls/Ed)\n\n\t\t\t\tRrs_obs = Lu/Ed - Rrs_refl\n\n\t\t\t\t# Least squares\n\t\t\t\tresid = np.sum((Lu_Ed_modelled - Lu/Ed)**2 * weights)\n\n\t\t\t\treturn resid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs\n\n\t\t\tstart_time = time.time()\n\n\t\t\treg = lm.minimize(lambda x: min_funct(x)[0], params=params, method='lbfgsb', options={'disp': verbose, 'gtol': 1e-16, 'eps': 1e-07, 'maxiter': 15000, 'ftol': 1e-16, 'maxls': 20, 'maxcor': 20}) \n\n\t\t\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\t\t\tresid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs = min_funct(reg.params)\n\t\t\treg.params.add('resid', resid, False, 0.0, 100, None)\n\n\t\t\treturn reg, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs", "def fit(self, X, y, max_iter=MAX_ITER):\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n X = self.normalize_data(X)\n X = self.add_bias(X)\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Fista(self, self.lambda_1)\n w = lasso.fit(xk=W[:, t], A=X[t], b=y[t], ind=self.groups,\n max_iter=max_iter)\n W[:, t] = w\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def test_lr_scalers():\n # We include a cost other than SumOfParams so that data is actually\n # queried from the training set, and the expected number of updates\n # are applied.\n cost = SumOfCosts([SumOfParams(), (0., DummyCost())])\n\n scales = [.01, .02, .05, 1., 5.]\n shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]\n\n learning_rate = .001\n\n class ModelWithScalers(Model):\n def __init__(self):\n super(ModelWithScalers, self).__init__()\n self._params = [sharedX(np.zeros(shape)) for shape in shapes]\n self.input_space = VectorSpace(1)\n\n def __call__(self, X):\n # Implemented only so that DummyCost would work\n return X\n\n def get_lr_scalers(self):\n return dict(zip(self._params, scales))\n\n model = ModelWithScalers()\n\n dataset = ArangeDataset(1)\n\n sgd = SGD(cost=cost,\n learning_rate=learning_rate,\n learning_rule=Momentum(.0),\n batch_size=1)\n\n sgd.setup(model=model, dataset=dataset)\n\n manual = [param.get_value() for param in model.get_params()]\n manual = [param - learning_rate * scale for param, scale in\n zip(manual, scales)]\n\n sgd.train(dataset=dataset)\n\n assert all(np.allclose(manual_param, sgd_param.get_value())\n for manual_param, sgd_param\n in zip(manual, model.get_params()))\n\n manual = [param - learning_rate * scale\n for param, scale\n in zip(manual, scales)]\n\n sgd.train(dataset=dataset)\n\n assert all(np.allclose(manual_param, sgd_param.get_value())\n for manual_param, sgd_param\n in zip(manual, model.get_params()))", "def agent_regress(traj):\n\n #TODO: regress x- y- coordinate saparately according to he time points\n time = traj[:, 0].reshape(len(traj[:, 0]), 1)\n x_dir = traj[:, 1]\n x_dir = x_dir.reshape(len(x_dir), 1)\n k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)\n mod_x = GPy.models.GPRegression(time, x_dir, k)\n mod_x.optimize(messages=False)\n mod_x.optimize_restarts(num_restarts = 30)\n\n time = traj[:, 0].reshape(len(traj[:, 0]), 1)\n k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)\n y = traj[:, 2]\n y = y.reshape(len(y), 1)\n m_y = GPy.models.GPRegression(time, y, k)\n m_y.optimize(messages=False)\n m_y.optimize_restarts(num_restarts = 30)\n m_xy = [mod_x, m_y]\n\n return m_xy", "def get_untrained_linear_regressor(bool_var, ml_models, training_feature, test_feature):\n if bool_var:\n lreg_name = 'linear_regressor' + get_suffix_ml_model()\n linear_regressor = linear_model.LinearRegression()\n multi_linear_regressor = MultiOutputRegressor(linear_regressor)\n lreg_train_feat = list(training_feature)\n lreg_test_feat = list(test_feature)\n ml_models.append([lreg_name, multi_linear_regressor, lreg_train_feat,\n lreg_test_feat])", "def train_lr(x, y, lamb):\n \n # TODO: implement the function.\n # initialize parameters w and b\n w = tf.Variable([0.0])\n b = tf.Variable(0.0)\n\n # set an optimizer\n # please check the documentation of tf.keras.optimizers.SGD\n optim = tf.keras.optimizers.SGD(learning_rate = 0.001)\n\n # loop to optimize w and b \n for i in range(1000):\n\n with tf.GradientTape() as gt:\n gt.watch([w, b])\n y_hat = regression_func(x, w, b)\n loss = loss_func(y, y_hat)\n\n dw, db = gt.gradient(loss, [w,b])\n\n del gt\n\n optim.apply_gradients(zip([dw,db],[w,b]))\n\n\n return w, b", "def train(self, i):\n\t\tlearningRate = self.learningRateFn(i)\n\t\tdiscountFactor = self.discountFactorFn(i)\n\t\ttrainingTuples = []\n\t\twhile self.currentSAVRSAVIndex < len(self.SAVRSAV):\n\t\t\tcurrentSAVRSAV = self.SAVRSAV[self.currentSAVRSAVIndex]\n\t\t\tvalue = currentSAVRSAV['predictedValueOfAction'] + learningRate * (currentSAVRSAV['reward'] + discountFactor*currentSAVRSAV['predictedValueOfNewAction'] - currentSAVRSAV['predictedValueOfAction'])\n\t\t\ttrainingTuples.append((currentSAVRSAV['state'],currentSAVRSAV['action'],value))\n\t\t\tself.currentSAVRSAVIndex += 1\n\t\treturn self.neuralNet.train(trainingTuples)", "def create_model():\n model = Sequential()\n\n model.add(Dense(18, input_dim=9, kernel_initializer='normal', activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(9, kernel_initializer='normal', activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(1, kernel_initializer='normal'))\n\n learning_rate = 0.001\n momentum = 0.8\n sgd = SGD(lr=learning_rate, momentum=momentum, nesterov=False)\n model.compile(loss='mean_squared_error', optimizer=sgd)\n model.summary()\n return model", "def run_regression(train_embeds, train_labels, test_embeds, test_labels):\n np.random.seed(1)\n from sklearn.linear_model import SGDClassifier\n from sklearn.dummy import DummyClassifier\n from sklearn.metrics import f1_score\n dummy = DummyClassifier()\n dummy.fit(train_embeds, train_labels)\n log = SGDClassifier(loss=\"log\", n_jobs=10, tol=1e-3)\n log.fit(train_embeds, train_labels)\n print(\"F1 score:\", f1_score(test_labels, log.predict(test_embeds), average=\"micro\"))\n print(\"Random baseline f1 score:\", f1_score(test_labels, dummy.predict(test_embeds), average=\"micro\"))", "def naive_forecaster_model_with_regressor(data_longley):\n y_train, _, X_train, _ = data_longley\n model = NaiveForecaster()\n return model.fit(y_train, X_train)", "def apply_ols_to_subject(total_s, total_r, r_outliers = False, smooth = False):\n\t#for sub in range(total_s+1)[1:]:\n\t\t#for run in range(total_r+1)[1:]:\n\tfor sub in range(1,17):\n\t\tfor run in range(1,4):\n\t\t\tdata = get_image(run, sub).get_data()\n\t\t\tif r_outliers == True:\n\t\t\t\tdata = remove_outliers(data)\n\t\t\tif smooth == True:\n\t\t\t\tdata = smooth_data(data, 2)\n\t\t\tbehavdata = get_behav(run, sub)\n\t\t\tprint(\"run:\", run, \"sub:\", sub)\n\t\t\tdesign = build_design(data, behavdata)\n\t\t\tif sub == 1 and run == 1:\n\t\t\t\tgain_loss_betas_2d = regression_fit(data, design)[2:,:]\n\t\t\telse: \n\t\t\t\tbetas = regression_fit(data, design)[2:,:]\n\t\t\t\tgain_loss_betas_2d = np.concatenate((gain_loss_betas_2d, betas), axis=0)\n\t\n\treturn gain_loss_betas_2d", "def TrainTrial(ss):\n\n if ss.NeedsNewRun:\n ss.NewRun()\n\n ss.TrainEnv.Step()\n\n # Key to query counters FIRST because current state is in NEXT epoch\n # if epoch counter has changed\n epc = env.CounterCur(ss.TrainEnv, env.Epoch)\n chg = env.CounterChg(ss.TrainEnv, env.Epoch)\n\n if chg:\n ss.LogTrnEpc(ss.TrnEpcLog)\n if ss.ViewOn and ss.TrainUpdt.value > leabra.AlphaCycle:\n ss.UpdateView(True)\n if ss.TestInterval > 0 and epc%ss.TestInterval == 0: # note: epc is *next* so won't trigger first time\n ss.TestAll()\n if epc >= ss.MaxEpcs or (ss.NZeroStop > 0 and ss.NZero >= ss.NZeroStop):\n # done with training..\n ss.RunEnd()\n if ss.TrainEnv.Run.Incr(): # we are done!\n ss.StopNow = True\n return\n else:\n ss.NeedsNewRun = True\n return\n\n # note: type must be in place before apply inputs\n ss.Net.LayerByName(\"Output\").SetType(emer.Target)\n ss.ApplyInputs(ss.TrainEnv)\n ss.AlphaCyc(True) # train\n ss.TrialStats(True) # accumulate", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def train_full_model(X,y_train):\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n\n #train\n print(\"train model\")\n tic = time.time()\n model.fit(x_train,y_train)\n tac = time.time()\n print(\"elapsed time\", tac-tic)\n\n #save data\n save_data(model,scaler,x_train,y_train)", "def model(self,sample):\n\n lca = self.lca\n \n self.amount_tech = lca.tech_params['amount']\n self.amount_bio = lca.bio_params['amount']\n\n self.i_sample = 0\n self.replace_non_parameterized_exchanges(sample)\n self.replace_parameterized_exchanges(sample)\n\n lca.rebuild_technosphere_matrix(self.amount_tech)\n lca.rebuild_biosphere_matrix(self.amount_bio)\n\n score = (sum(lca.characterization_matrix)*lca.biosphere_matrix) * \\\n spsolve(lca.technosphere_matrix,lca.demand_array)\n\n np.append(self.scores, score)\n\n return score", "def get_linear_model(params):\n\n ss = StandardScaler()\n lr = ElasticNet(selection='random', random_state=42) # EN\n\n if params['pca']:\n pca = PCA(n_components=params['pca_comps'], whiten=True)\n lr_model = Pipeline(steps=(['scale', ss], ['pca', pca], ['model', lr])) # pipeline\n else:\n lr_model = Pipeline(steps=(['scale', ss], ['model', lr])) # pipeline\n\n lr_model_params = {\n 'model__alpha': loguniform(1e-1, 1e3),\n 'model__l1_ratio': uniform(0.1, .9)\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV\n linear_model = RandomizedSearchCV(lr_model, lr_model_params, n_iter=500, cv=5)\n\n return clone(linear_model)", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def train_model(self, data:List[np.ndarray]):\n d = np.vstack(data)\n np.random.shuffle(d)\n self.regressor.fit(\n X=self.input(d),\n y=self.output(d)\n )", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def experiment_linear_tradeoff_linf(_):\n adv_norm_type = 'linf'\n dual_norm_type = 'l1'\n # Min l1-norm solution found (norm=0.6876)\n attack_eps = 1/0.6876\n attack_step_dir = 'sign_grad'\n module_name = 'train'\n log_dir = 'runs_linear_tradeoff_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [32]\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 500),\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10),\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n params = []\n\n # reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]\n # Between 1e-3 and 1e-1 for d/n=10 the adv robustness drops\n reg_coeff += [3e-3, 5e-3, 3e-2, 5e-2, 3e-1, 5e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n\n # Explicit regularization with line search\n # njobs=3*6*20*4*2=2880\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n for i in [1] + list(np.arange(0.1, 2, 0.2)): # [0.1, 0.3, 0.5, 0.7, 1, 1.3]:\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_iter', float(attack_eps) * i),\n ('eps_tot', float(attack_eps) * i),\n ]))\n params += [OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)]\n\n return params, log_dir, module_name, exclude", "def fit(self, X, y, X_validate, y_validate):\n \n iterate = 800\n \n self.SGD_theta_list = [0]*len(X[0])\n self.SGD_bias = 0\n\n SGD_cost_history = []\n SGD_validate_cost_history = []\n\n for i in range(iterate):\n if(i%100==0):\n print(i,\" iterations\")\n selection = random.randint(0, len(X)-1) #selecting one random row for SGD\n temp_X = []\n temp_X.append(X[selection])\n temp_y = []\n temp_y.append(y[selection])\n self.SGD_bias, self.SGD_theta_list = self.update_thetas(np.array(temp_X), np.array(temp_y), self.SGD_theta_list, self.SGD_bias,self.training_rate)\n SGD_cost = self.cost_function(X, y, self.SGD_theta_list, self.SGD_bias)\n SGD_cost_history.append(SGD_cost)\n SGD_validate_cost = self.cost_function(X_validate, y_validate,self.SGD_theta_list, self.SGD_bias)\n SGD_validate_cost_history.append(SGD_validate_cost)\n\n self.FINAL_SGD_TRAIN_LOSS.append(SGD_cost_history[-1])\n self.FINAL_SGD_VALIDATE_LOSS.append(SGD_validate_cost_history[-1])\n\n plt.plot(list(range(iterate)), SGD_cost_history)\n plt.plot(list(range(iterate)), SGD_validate_cost_history)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Loss SGD\")\n plt.show()\n \n \n self.BGD_theta_list = [0]*len(X[0])\n self.BGD_bias = 0\n\n BGD_cost_history = []\n BGD_validate_cost_history = []\n\n for i in range(iterate):\n if(i%100==0):\n print(i,\" iterations\")\n selection = random.randint(0, len(X)-1)\n \n self.BGD_bias, self.BGD_theta_list = self.update_thetas(X, y, self.BGD_theta_list, self.BGD_bias,self.training_rate)\n\n BGD_cost = self.cost_function(X, y, self.BGD_theta_list, self.BGD_bias)\n BGD_cost_history.append(BGD_cost)\n BGD_validate_cost = self.cost_function(X_validate, y_validate,self.BGD_theta_list, self.BGD_bias)\n BGD_validate_cost_history.append(BGD_validate_cost)\n\n self.FINAL_BGD_TRAIN_LOSS.append(BGD_cost_history[-1])\n self.FINAL_BGD_VALIDATE_LOSS.append(BGD_validate_cost_history[-1])\n\n plt.plot(list(range(iterate)), BGD_cost_history)\n plt.plot(list(range(iterate)), BGD_validate_cost_history)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Loss BGD\")\n plt.show()\n\n print(\"FINAL_SGD_TRAIN_LOSS\\n\",self.FINAL_SGD_TRAIN_LOSS)\n print(\"FINAL_SGD_VALIDATE_LOSS\\n\",self.FINAL_SGD_VALIDATE_LOSS)\n print(\"FINAL_BGD_TRAIN_LOSS\\n\",self.FINAL_BGD_TRAIN_LOSS)\n print(\"FINAL_BGD_VALIDATE_LOSS\\n\",self.FINAL_BGD_VALIDATE_LOSS)\n\n \n return self", "def demo_train(ts_struct_list, frc_model=None, fg_mdl=None, fs_mdl=None, verbose=False,\n return_model=False, rewrite=True):\n\n # Check arguments:\n if fg_mdl is None:\n fg_mdl = frc_class.IdentityGenerator(name=\"Identity generator\", on=False)\n\n if fs_mdl is None:\n fs_mdl = gnt_class.FeatureGeneration() # IdentityModel(name=\"Identity selector\")\n\n if frc_model is None:\n frc_model = frc_class.CustomModel(Lasso, name=\"Lasso\", alpha=0.01)\n\n model = frc_class.PipelineModel(gen_mdl=fg_mdl, sel_mdl=fs_mdl, frc_mdl=frc_model)\n results = []\n res_text = []\n\n for ts in ts_struct_list:\n data = regression_matrix.RegMatrix(ts, x_idx=TS_IDX, y_idx=TS_IDX)\n\n # Create regression matrix\n data.create_matrix(nsteps=N_STEPS, norm_flag=True) # this creates data.Y, data.X and some other fields\n\n # Split data for training and testing\n data.train_test_split(TRAIN_TEST_RATIO)\n\n # train the model. This returns trained pipeline and its steps\n model, frc, gen, sel = model.train_model(data.trainX, data.trainY)\n\n selection_res = \"\\n Feature selection results: problem status {}, selected {} from {} \\\\\\\\ \\n\".\\\n format(sel.status, len(sel.selected), sel.n_vars)\n\n frcY, _ = data.forecast(model) # returns forecasted matrix of the same shape as data.Y\n # frcY, idx_frc = data.forecast(model, idx_rows=data.idx_test) # this would return forecasts only for data.testY\n\n data.plot_frc(n_frc=5, n_hist=10, folder=SAVE_DIR) #this saves figures into SAVE_DIR\n\n train_mae = data.mae(idx_rows=data.idx_train, idx_original=data.original_index)\n train_mape = data.mape(idx_rows=data.idx_train, idx_original=data.original_index)\n\n test_mae = data.mae(idx_rows=data.idx_test, idx_original=data.original_index)\n test_mape = data.mape(idx_rows=data.idx_test, idx_original=data.original_index)\n\n index = [ts.data[i].name for i in TS_IDX]\n res1 = pd.DataFrame(train_mae, index=index, columns=[(\"MAE\", \"train\")])\n res2 = pd.DataFrame(train_mape, index=index, columns=[(\"MAPE\", \"train\")])\n res3 = pd.DataFrame(test_mae, index=index, columns=[(\"MAE\", \"test\")])\n res4 = pd.DataFrame(test_mape, index=index, columns=[(\"MAPE\", \"test\")])\n res = pd.concat([res1, res2, res3, res4], axis=1)\n\n configuration_str = \"\\n Time series {} forecasted with {} + '{}' feature generation model and \" \\\n \"'{}' feature selection model \\\\\\\\ \\n\".format(ts.name, frc.name, gen.name, sel.name)\n if verbose:\n print(configuration_str)\n print(selection_res)\n print(res)\n\n results.append(res)\n res_text.append(configuration_str)\n res_text.append(selection_res)\n\n saved_mdl_fname = model.save_model(file_name=FNAME_PREFIX, folder=SAVE_DIR) # saving in not an option yet\n # model = frc_class.PipelineModel().load_model(file_name=fname)\n\n # write results into a latex file\n my_plots.save_to_latex(results, df_names=res_text, folder=SAVE_DIR, rewrite=rewrite)\n print(\"Results saved to folder {}\".format(SAVE_DIR))\n\n if return_model:\n return model, saved_mdl_fname\n\n return saved_mdl_fname", "def fit(self):\n self.lr = LRHMC( self.X_train, self.X_test, self.y_train, self.y_test )\n self.lr.fit()", "def linear_regression(x, t, basis, reg_lambda=0, degree=0):\n\n # TO DO:: Complete the design_matrix function.\n # e.g. phi = design_matrix(x,basis, degree)\n \n phi=design_matrix(x,basis,degree)\n phi_cross=np.linalg.pinv(phi)\n #t_tranpose=t.T\n # TO DO:: Compute coefficients using phi matrix\n if(reg_lambda==0):\n w=phi_cross.dot(t)\n if(reg_lambda!=0):\n # print(\"Inside lambda if: \")\n n_col=phi.shape[1]\n #r=phi.T.dot(phi) + reg_lambda * np.identity(n_col)\n r=reg_lambda*np.identity(n_col)+phi.T.dot(phi)\n r=np.linalg.inv(r)\n\t#r=np.linalg.inv(r)\n [email protected]\n w=z@t\n #w = phi_cross.dot(t)\n\n # Measure root mean squared error on training data.\n # Basic algorithim goes as follows:\n # \t1. We take Equation 3.12 * 1/n \n # Then we math.sqrt( of the equation obtained in 1.)\n\n # t_est: variable for estimation of targets\n t_est= phi.dot(w)\n \n # variable to calculate the difference between our target and estimate\n # target is the left operand, estimate is right operand\n diff=t-t_est\n \n # Square all the elements\n diff_squared=np.power(diff,2)\n\n # Sum up all the elements of diff_squared, i.e take square of\n # all elements then sum them up\n\n sig_squared=diff_squared.sum()\n\n # multiply by 1/2 as specified in PRML\n\n half_sig_squared=0.5*(sig_squared)\n\n # Divide by population size and square root\n population_size= t.shape[0]\n\n rmse_bforesqrt=half_sig_squared/population_size\n\n train_err = np.sqrt(rmse_bforesqrt)\n\n return (w, train_err)", "def train(self):\n\n # Create random sample of size self.n\n inst_set = []\n while len(inst_set) < self.n:\n for inst in self.training_instances:\n if np.random.binomial(1, 0.5) == 1 and len(inst_set) < self.n:\n inst_set.append(inst)\n\n if len(inst_set) == self.n:\n break\n\n fvs, labels = TRIMLearner.get_fv_matrix_and_labels(inst_set)\n\n # Calculate initial theta\n w, b = self._minimize_loss(fvs, labels)\n\n old_loss = -1\n loss = 0\n while loss != old_loss:\n if self.verbose:\n print('Current loss:', loss)\n\n # Calculate minimal set\n loss_vector = fvs.dot(w) + b\n loss_vector -= labels\n loss_vector = list(map(lambda x: x ** 2, loss_vector))\n\n loss_tuples = []\n for i in range(len(loss_vector)):\n loss_tuples.append((loss_vector[i], inst_set[i]))\n loss_tuples.sort(key=lambda x: x[0]) # sort using only first elem\n\n inst_set = list(map(lambda tup: tup[1], loss_tuples[:self.n]))\n\n # Minimize loss\n fvs, labels = TRIMLearner.get_fv_matrix_and_labels(inst_set)\n w, b = self._minimize_loss(fvs, labels)\n\n old_loss = loss\n loss = self._calc_loss(fvs, labels, w, b)\n\n self.w = w\n self.b = b", "def __init__(self, log=True, normalize=False):\r\n self.model = LinearRegression(normalize=normalize)\r\n self.log = log", "def create_model_testing(lyrs, act, opt='Adam', dr=0.2):\n\n # set random seed for reproducibility\n seed(42)\n tf.random.set_seed(42)\n\n # create sequential model\n model = Sequential()\n\n # create first hidden layer\n model.add(Dense(lyrs[0], input_dim=X_train.shape[1], activation=act))\n\n # create additional hidden layers\n for i in range(1,len(lyrs)):\n model.add(Dense(lyrs[i], activation=act))\n\n # add dropout, default is none\n model.add(Dropout(dr))\n\n # create output layer\n model.add(Dense(1, activation=\"sigmoid\")) # output layer\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n return model", "def linearRegression(self, xtr, ytr, xte, yte):\n LEARNING_RATE = 0.5\n with self.graph.as_default() as graph:\n with tf.name_scope('training'):\n with tf.name_scope('loss'):\n train_loss = tf.reduce_mean(\n tf.square(self.y_train - self.y_placeholder))\n with tf.name_scope('optimizer'):\n optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)\n train = optimizer.minimize(train_loss) \n # test loss may be different\n with tf.name_scope('test'):\n with tf.name_scope('loss'):\n test_loss = tf.reduce_mean(\n tf.square(self.y_test - self.y_placeholder))\n with tf.Session() as sess:\n # Save the variables to disk.\n model_dir = \"./ckpt/\"\n builder = tf.saved_model.builder.SavedModelBuilder(model_dir)\n\n # Initialize variables\n sess.run(tf.global_variables_initializer())\n TRAIN_STEPS = 201\n\n for step in range(TRAIN_STEPS):\n sess.run([train], \n feed_dict={self.x_placeholder: xtr, \n self.y_placeholder: ytr})\n if step % 20 == 0:\n test_loss_val = sess.run([test_loss],\n feed_dict={self.x_placeholder: xte, \n self.y_placeholder: yte})\n print('step {}, test loss is {}'.format(\n step, test_loss_val))\n\n # Final training results\n a = sess.run(self.a)\n b = sess.run(self.b)\n # Draw result\n minx=np.min(np.concatenate((xtr,xte)))\n maxx=np.max(np.concatenate((xtr,xte)))\n xref=np.linspace(minx,maxx,100)\n plt.figure(0)\n plt.plot(xref, a*xref+b, 'r.')\n plt.plot(xtr, ytr, 'b.')\n plt.plot(xte, yte, 'g.')\n plt.show()", "def mlr(df, exp_vars, resp_var, \n method='ols', \n fit_intercept=True,\n kcv=3,\n normalize=False):\n from sklearn import cross_validation\n from sklearn.linear_model import LinearRegression, RidgeCV\n from sklearn.linear_model import LassoCV, ElasticNetCV\n from sklearn.metrics import r2_score\n from sklearn.utils import resample\n import matplotlib.pyplot as plt\n import seaborn as sn\n import pandas as pd\n import numpy as np\n \n # Separate data\n X = df[exp_vars]\n y = df[resp_var]\n \n # Setup model\n if method == 'ols':\n model = LinearRegression(fit_intercept=fit_intercept, \n normalize=normalize)\n elif method == 'lasso':\n model = LassoCV(fit_intercept=fit_intercept, \n normalize=normalize, \n max_iter=10000,\n cv=kcv)\n elif method == 'ridge':\n model = RidgeCV(fit_intercept=fit_intercept, \n normalize=normalize, \n alphas=np.logspace(-10, 10, 21))\n elif method == 'el-net':\n model = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],\n fit_intercept=fit_intercept, \n normalize=normalize,\n cv=kcv)\n else:\n raise ValueError('\"method\" parameter must be in [\"ols\", \"lasso\", \"ridge\", \"el-net\"]')\n \n # k-fold cross validation\n #cv_scores = cross_validation.cross_val_score(model, X, y, cv=kcv, scoring='r2')\n #print 'Mean r2 from %s-fold CV: %.3f\\n' % (kcv, cv_scores.mean())\n \n # Train model on full dataset\n model.fit(X, y)\n \n # Get y-hat\n y_pred = model.predict(X)\n \n # r2 based on calibration data\n r2 = r2_score(y, y_pred)\n print 'r2:', r2\n print ''\n \n # Summary of model\n print model\n print ''\n \n if method == 'lasso':\n print 'Lasso alpha:', model.alpha_\n print ''\n elif method == 'ridge':\n print 'Ridge alpha:', model.alpha_\n print ''\n elif method == 'el-net':\n print 'Elastic net alpha:', model.alpha_ \n print 'Elastic net L1 ratio:', model.l1_ratio_ \n print ''\n else: # OLS\n pass\n \n # Plot\n fig = plt.figure(figsize=(15,15))\n \n # Paired points for each site\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax1.plot(range(0, len(X.index)), y, 'ro', label='Observed')\n ax1.plot(range(0, len(X.index)), y_pred, 'b^', label='Modelled')\n \n ax1.set_xticks(range(0, len(X.index)))\n ax1.set_xticklabels(X.index, rotation=90, fontsize=12)\n ax1.set_xlim(0, len(X.index)-1)\n \n ax1.set_xlabel('Site code', fontsize=16)\n ax1.set_ylabel(resp_var)\n ax1.set_title('Points paired for each location', fontsize=20)\n ax1.legend(loc='best', fontsize=16)\n \n # Modelled versus observed\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=1)\n ax2.plot(y, y_pred, 'ro')\n ax2.set_xlabel('Observed', fontsize=16)\n ax2.set_ylabel('Modelled', fontsize=16)\n ax2.set_title('Modelled versus observed', fontsize=20)\n \n # Hist of residuals\n ax3 = plt.subplot2grid((2,2), (1,1), colspan=1)\n sn.distplot(y - y_pred, kde=True, ax=ax3)\n ax3.set_title('Histogram of residuals', fontsize=20)\n \n plt.tight_layout()\n \n # Get param estimates\n params = pd.Series(model.coef_, index=X.columns)\n\n # Estimate confidence using bootstrap\n # i.e. what is the std. dev. of the estimates for each parameter\n # based on 1000 resamplings\n err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], \n axis=0)\n\n # Build df\n res = pd.DataFrame({'effect':params,\n 'error':2*err})\n\n # Rough indicator of significance: are the estimated values more than\n # 2 std. devs. from 0 (~95% CI?). NB: this assumnes the \"marginal posterior\" \n # is normal, which I haven't tested for and which quite possibly isn't true\n # - use with care! \n res['signif'] = np.abs(res['effect']) > res['error']\n \n return res", "def __init__(self, base_model='LogisticRegression', number_model=50, \n hidden_layer_sizes=(100,), activation='relu',\n kernel='poly', degree=3, gamma='auto',\n criterion='gini', reg_penalty='l2', reg=0.001, random_state=0):\n self.number_model = number_model\n r = random_state\n # Initialise all_model list\n self.all_model = []\n for i in range(number_model):\n if base_model=='Perceptron':\n curr_model = Perceptron(reg_penalty=reg_penalty, reg=reg,\n random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='MLPerceptron':\n curr_model = MLPerceptron(hidden_layer_sizes=hidden_layer_sizes,\n activation=activation, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='LogisticRegression':\n curr_model = LogisticRegression(reg_penalty=reg_penalty,\n reg_inv=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelSVM':\n curr_model = ModelSVM(kernel=kernel, degree=degree,\n gamma=gamma, reg=reg, random_state=i+r*100)\n self.all_model.append(curr_model.model)\n elif base_model=='ModelDecisionTree':\n curr_model = ModelDecisionTree(criterion=criterion, random_state=i+r*100)\n self.all_model.append(curr_model.model)", "def _build_regression(endog, exog, model, lasso_positive, alpha):\n if model=='Ridge':\n mod = Ridge(alpha=alpha)\n elif model=='Lasso':\n mod = Lasso(alpha=alpha, positive=lasso_positive)\n else:\n raise ValueError(\"Model must be of type Ridge or Lasso\")\n \n mod.fit(endog, exog)\n return mod", "def singlevar_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = keras.models.Sequential()\n model.add(Dense(1, input_dim=1, kernel_initializer=init, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()", "def add_X_NNLO_all_fit(axe, xran, values, errors, fill=False, save=False):\n B = values[\"B\"]\n F_0 = values[\"F_0\"]\n LAMBDA4 = values[\"Lambda4\"]\n LAMBDA3 = values[\"Lambda3\"]\n # LAMBDA12 = values[\"Lambda12\"]\n km = values[\"km\"]\n kf = values[\"kf\"]\n\n x = np.linspace(xran[0], xran[1], num=500)\n\n Msqr = x * (8 * (np.pi**2) * (F_0**2))\n arg4 = LAMBDA4**2 / Msqr\n arg3 = LAMBDA3**2 / Msqr\n # arg12 = LAMBDA12**2 / Msqr\n\n l1 = -0.4\n l2 = 4.3\n\n Lambda1sqr = (phys_pion**2) * np.exp(l1)\n Lambda2sqr = (phys_pion**2) * np.exp(l2)\n\n lnLambda12sqr = (7.0 * np.log(Lambda1sqr) + 8.0 * np.log(Lambda2sqr)) / 15.0\n lambda12sqr = np.exp(lnLambda12sqr)\n\n arg12 = lambda12sqr / Msqr\n\n lm = 1.0 / 51.0 * (60.0 * np.log(arg12) - 9.0 * np.log(arg3) + 49.0)\n lf = 1.0 / 30.0 * (30.0 * np.log(arg12) + 6.0 * np.log(arg3) - 6.0 * np.log(arg4) + 23.0)\n\n y = F_0 * (1.0 + x * np.log(arg4) - 5.0 / 4.0 * (x**2) * (lf)**2 + kf * x**2)\n\n plots = []\n paramstring = \" \".join(\"${}={}$\".format(format_parameters(k), print_paren_error(float(v), float(errors[k])))\n for k, v in sorted(values.iteritems()))\n paramstring = \"$ M_\\pi<{}$\".format(values[\" M_\\pi<\"])\n plabel = \"NNLO Mss=0 fit: {}\".format(paramstring)\n plabel = \"NNLO $a\\\\to 0$ $\\Delta Mss=0$ \"\n plabel = \"NNLO\"\n\n if \"cutoff\" in values:\n plabel += \" $M_\\pi < {}$\".format(values[\"cutoff\"])\n addplot(plots, axe, fill, save, x=x, y=y, params={\"label\":plabel, \"ls\":\"--\", \"lw\":4})\n\n return plots", "def sklearn_model(train_data):\n X, y = train_data\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = LogisticRegression(\n multi_class=\"multinomial\", solver=\"lbfgs\", max_iter=1000\n )\n model.fit(X, y)\n return model", "def fit_model():\n global _HOME_OWNERSHIP\n _HOME_OWNERSHIP = {x: i for i, x in enumerate([\"rent\", \"own\", \"mortgage\", \"other\"])}\n df = pd.read_csv(os.path.join(settings.BASE_DIR, \"LoanStats3a.csv\"), skiprows=1).head(5000)\n df = df[df.apply(is_poor_coverage, axis=1)]\n df['year_issued'] = df.issue_d.apply(lambda x: int(x.split(\"-\")[0]))\n df_term = df[df.year_issued < 2012]\n\n bad_indicators = [\n \"Late (16-30 days)\",\n \"Late (31-120 days)\",\n \"Default\",\n \"Charged Off\"\n ]\n df_term['is_rent'] = df_term.home_ownership == \"RENT\"\n df_term = df_term[df_term.home_ownership.apply(lambda x: x is not None and x != 'NONE')]\n df_term['is_bad'] = df_term.loan_status.apply(lambda x: x in bad_indicators)\n df_term['term'] = df_term.term.apply(lambda x: x.split()[0])\n df_term['home_ownership'] = df_term.home_ownership.apply(lambda x: _HOME_OWNERSHIP[x.lower()])\n global _LENDING_PREDICT_MODEL\n _LENDING_PREDICT_MODEL = LogisticRegression()\n _LENDING_PREDICT_MODEL.fit(df_term[_FEATURES], df_term.is_bad)", "def evaluate(self, train_set, test_set, shuffle_batch=True,\n epochs=25, lr_decay=0.95, sqr_norm_lim=9,labels=None,model=None): \n cost = self.negative_log_likelihood(self.y) \n dropout_cost = self.dropout_negative_log_likelihood(self.y)\n # adadelta upgrades: dict of variable:delta\n grad_updates = self.sgd_updates_adadelta(dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n # shuffle dataset and assign to mini batches.\n # if dataset size is not a multiple of batch size, replicate \n # extra data (at random)\n np.random.seed(3435)\n batch_size = self.batch_size\n if train_set.shape[0] % batch_size > 0:\n extra_data_num = batch_size - train_set.shape[0] % batch_size\n #extra_data = train_set[np.random.choice(train_set.shape[0], extra_data_num)]\n perm_set = np.random.permutation(train_set) \n extra_data = perm_set[:extra_data_num]\n new_data = np.append(train_set, extra_data, axis=0)\n else:\n new_data = train_set\n \n shuffled_data = np.random.permutation(new_data) # Attardi\n n_batches = shuffled_data.shape[0]/batch_size\n # divide train set into 90% train, 10% validation sets\n n_train_batches = int(np.round(n_batches*0.8))\n n_val_batches = n_batches - n_train_batches\n train_set = shuffled_data[:n_train_batches*batch_size,:]\n val_set = shuffled_data[n_train_batches*batch_size:,:] \n # push data to gpu \n # the dataset has the format [word_indices,padding,user,label]\n train_set_x, train_set_y = shared_dataset(train_set[:,:-2], train_set[:,-1]) \n train_set_u = theano.shared(np.asarray(train_set[:,-2],dtype='int32')) \n # val_set_x = val_set[:,:-2]\n # val_set_u = val_set[:,-2]\n # val_set_y = val_set[:,-1]\n val_set_x, val_set_y = shared_dataset(val_set[:,:-2], val_set[:,-1])\n val_set_u = theano.shared(np.asarray(val_set[:,-2],dtype='int32')) \n test_set_x = test_set[:,:-2]\n test_set_u = test_set[:,-2]\n test_set_y = test_set[:,-1] \n batch_start = self.index * batch_size\n batch_end = batch_start + batch_size\n\n # compile Theano functions to get train/val/test errors\n \n \n test_y_pred = self.predict(test_set_x,test_set_u)\n test_error = T.mean(T.neq(test_y_pred, self.y))\n # errors on train set\n if self.Users is not None:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]\n },\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end], \n self.u: val_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.u, self.y], test_error, allow_input_downcast=True)\n else:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.y], test_error, allow_input_downcast=True)\n\n # start training over mini-batches\n print 'training...' \n best_val_perf = 0\n test_perf = 0 \n patience = 5\n drops = 0\n prev_val_perf = 0 \n for epoch in xrange(epochs):\n start_time = time.time()\n # FIXME: should permute whole set rather than minibatch indexes\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n self.set_zero(self.zero_vec) # CHECKME: Why?\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n self.set_zero(self.zero_vec)\n train_losses = [train_error(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1 - np.mean(val_losses) \n info = 'epoch: %i\\%i (%.2f secs) train acc: %.2f %% | val acc: %.2f %%' % (\n epoch,epochs, time.time()-start_time, train_perf * 100., val_perf*100.) \n # from ipdb import set_trace; set_trace()\n if val_perf > prev_val_perf: \n drops=0\n if val_perf >= best_val_perf:\n best_val_perf = val_perf\n info+= \" **\"\n if model:\n # print \"save model\"\n self.save(model)\n if self.Users is not None:\n test_loss = test_model(test_set_x, test_set_u, test_set_y)\n else:\n test_loss = test_model(test_set_x, test_set_y)\n test_perf = 1 - test_loss \n else: \n drops+=1\n if drops >= patience:\n print \"Ran out of patience...\"\n break\n prev_val_perf = val_perf\n print info\n # set_trace() \n return test_perf", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n #print('Training task {} with group lasso'.format(t))\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def build_baseline_model(num_of_input):\n \n # create model\n model = Sequential()\n model.add(Dense(2, input_dim=num_of_input, activation='relu'))\n model.add(Dense(1, activation='linear'))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer='adam')\n return model", "def run_SGD(file_path):\n\n df_train = pd.read_csv(f'{file_path}/without_anom.csv')\n features_list = ['Direction', 'Speed']\n df_train = df_train[features_list]\n\n scalar = MaxAbsScaler()\n\n X_train = scalar.fit_transform(df_train)\n\n linear_model = SGDRegressor()\n multi_model = MultiOutputRegressor(SGDRegressor())\n\n multi_model.fit(X_train, X_train)\n\n multi_model_predict = multi_model.predict(X_train)\n\n print(multi_model_predict)", "def create_model(X_train, lyrs=[16], act=\"relu\", opt='Adam', dr=0.2):\n\n # set random seed for reproducibility\n seed(42)\n tf.random.set_seed(42)\n\n model = Sequential()\n\n # create first hidden layer\n model.add(Dense(lyrs[0], input_dim=X_train.shape[1], activation=act))\n\n # create additional hidden layers\n for i in range(1, len(lyrs)):\n model.add(Dense(lyrs[i], activation=act))\n\n # dropout\n model.add(Dropout(dr))\n\n # create output layer\n model.add(Dense(1, activation=\"sigmoid\")) # output layer\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n return model", "def fit_all_synthesis(t, l, bounds=(0, np.inf), alpha_0=1, beta_0=1, gamma_0=1):\n l = l.A if issparse(l) else l\n\n tau = np.hstack((0, t))\n x = np.hstack((0, l))\n\n f_lsq = lambda p: sol_u(tau, 0, p[0], p[1]) + sol_s(tau, 0, 0, p[0], p[1], p[2]) - x\n ret = least_squares(f_lsq, np.array([alpha_0, beta_0, gamma_0]), bounds=bounds)\n return ret.x[0], ret.x[1], ret.x[2]", "def demo_linear_regression(\n n_in = 100,\n n_out = 4,\n n_training_samples = 500,\n n_test_samples = 500,\n noise = .1,\n n_epochs = 10,\n eta = 0.001,\n random_seed = 1234,\n score_report_period = 100,\n ):\n\n # Setup data\n rng = np.random.RandomState(random_seed)\n w_true = rng.randn(n_in, n_out)*.1 # (n_in, n_out)\n training_data = rng.randn(n_training_samples, n_in) # (n_training_samples, n_in)\n training_target = training_data.dot(w_true) + noise*rng.randn(n_training_samples, n_out) # (n_training_samples, n_out)\n test_data = rng.randn(n_test_samples, n_in) # (n_test_samples, n_in)\n test_target = test_data.dot(w_true) + noise*rng.randn(n_test_samples, n_out) # (n_test_samples, n_out)\n predictor = OnlineLinearRegressor(n_in=n_in, n_out=n_out, learning_rate=eta)\n\n # Train and periodically record scores.\n epoch_scores = []\n for i in xrange(n_training_samples*n_epochs+1):\n if i % score_report_period == 0:\n training_out = predictor.predict(training_data)\n training_cost = ((training_target-training_out)**2).sum(axis=1).mean(axis=0)\n test_out = predictor.predict(test_data)\n test_cost = ((test_target-test_out)**2).sum(axis=1).mean(axis=0)\n print('Epoch {epoch}: Test Cost: {test}, Training Cost: {train}'.format(epoch=float(i)/n_training_samples, test=test_cost, train=training_cost))\n epoch = float(i) / n_training_samples\n epoch_scores.append((epoch, training_cost, test_cost))\n predictor.train(training_data[[i % n_training_samples]], training_target[[i % n_training_samples]])\n\n # Plot\n epochs, training_costs, test_costs = zip(*epoch_scores)\n plt.plot(epochs, np.array([training_costs, test_costs]).T)\n plt.xlabel('epoch')\n plt.ylabel('cost')\n plt.legend(['Training Cost', 'Test Cost'])\n plt.title(\"Learning Curve\")\n plt.ion()\n plt.show()\n\n return {'training_cost': training_cost, 'test_cost': test_cost}", "def all_param_AN(ds, myloss='mean_squared_error'):\n wr = ds[0]\n wl = ds[1]\n V = ds[2]\n omega = ds[3]\n input = np.zeros((len(wl),2))\n input[:,0] = wr\n input[:,1] = wl\n output = np.zeros((len(wl),2))\n output[:,0] = V\n output[:,1] = omega\n input_layer = keras.layers.Input((2,),name=\"input\") #wr et wl\n hidden_layer = keras.layers.Dense(2, activation='linear', kernel_initializer='uniform',\n input_shape=(2,), use_bias=False, name=\"output\") #V et omega\n output_layer = hidden_layer(input_layer)\n ann = keras.models.Model(inputs=input_layer, outputs=output_layer)\n opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n ann.compile(loss=myloss, optimizer=opt)\n ann_in, ann_out = input, output\n history = ann.fit(ann_in, ann_out, epochs=40, batch_size=64, verbose=0,\n shuffle=True, validation_split=0.1)#, callbacks=callbacks)\n\n \"\"\"plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\"\"\"\n\n weights = hidden_layer.get_weights()[0]\n Rr_est = weights[0][0]*2\n Rl_est = weights[1][0]*2\n L_est1 = 1/(weights[0][1]/Rr_est)\n L_est2 = -1/(weights[1][1]/Rr_est)\n return Rr_est, Rl_est, (L_est2+L_est1)/2 #moyenne des deux longueurs obtenues", "def _fit(self, _X, _y):\n\n self.model = linear_model.LogisticRegression(penalty=self.penalty, random_state=self.seed,\n solver='saga', n_jobs=self.n_jobs)\n self.model.fit(_X, _y)", "def main():\n ###################\n ## Dataset ##\n ###################\n # (X, y) m = 100, n = 1\n _data_x, data_y = make_regression(n_samples=100, n_features=1, noise=10)\n\n # show the dataset\n plt.subplot(2, 2, 1)\n plt.title(\"dataset\")\n plt.scatter(_data_x, data_y)\n\n # Transform the dataset into matrices.\n # That is used for writing the equations in the matrix form.\n data_x = np.hstack((_data_x, np.ones(_data_x.shape)))\n data_y = data_y.reshape(data_y.shape[0], 1)\n\n #################\n ## Model ##\n #################\n # initial parameters\n init_params = np.random.randn(2, 1)\n\n # initial model\n init_model = model(data_x, init_params)\n\n # plot initial model\n plt.subplot(2, 2, 2)\n plt.title(\"initial model\")\n plt.scatter(_data_x, data_y)\n plt.plot(_data_x, init_model, c='g')\n\n #########################\n ## cost function ##\n #########################\n # show cost function for initial parameters\n print(cost_function(data_x, data_y, init_params))\n\n ####################\n ## training ##\n ####################\n # learning rate\n learn_rate = 0.005\n # number of iterations\n number_iterations = 1_000\n\n # final parameters for our model\n final_params, cost_tracking = gradient_descent(\n data_x, data_y, init_params, learn_rate, number_iterations)\n\n # final model\n final_model = model(data_x, final_params)\n\n # show cost function for final parameters\n print(cost_function(data_x, data_y, final_params))\n\n # plot final model\n plt.subplot(2, 2, 3)\n plt.title(\"final model\")\n plt.scatter(_data_x, data_y)\n plt.plot(_data_x, final_model, c='r')\n\n ##########################\n ## learning curve ##\n ##########################\n # plot Cost history\n plt.subplot(2, 2, 4)\n plt.title(\"cost tracking\")\n plt.plot(range(number_iterations), cost_tracking)\n\n ########################################\n ## Coefficient of determination ##\n ########################################\n print(coefficient_determination(data_y, final_model))\n\n plt.show()", "def baseline_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = keras.models.Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n model.add(Dense(12, input_dim=12, kernel_initializer=init, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def eval_all(folder):\n optimizers = [\n tf.keras.optimizers.Adadelta(learning_rate=0.01),\n tf.keras.optimizers.Adagrad(learning_rate=0.002),\n tf.keras.optimizers.Adam(learning_rate=0.0001),\n tf.keras.optimizers.Adamax(learning_rate=0.0005),\n tf.keras.optimizers.Ftrl(learning_rate=0.002),\n tf.keras.optimizers.Nadam(learning_rate=0.001),\n tf.keras.optimizers.RMSprop(learning_rate=0.0005),\n tf.keras.optimizers.SGD(learning_rate=0.003),\n ]\n\n epochs = [\n 500, 120, 80, 150, 300, 60, 100, 500\n ]\n\n biased_randomized = [\n (models.DefaultModel, False),\n (models.BiasedModel, False),\n (models.NeuralModel, False),\n (models.DefaultModel, True),\n (models.BiasedModel, True),\n (models.NeuralModel, True),\n ]\n\n for optimizer, n_epochs in zip(optimizers, epochs):\n for model, rndmz in biased_randomized:\n eval_optimizer(folder,\n model,\n optimizer,\n n_epochs,\n rndmz)", "def test_lcsmodel_class():\n\n # Set the problem size.\n n = 1000\n p = 3\n\n # Define the test model\n TM = test.Model2(n,p)\n\n # Note: diff_A/diff_b do not require A/b as an input in this case,\n # but in the more general case they might.\n\n # Check the basic model calculations.\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,B = TM.eval_A_and_b(theta)\n\n dA_1, dB_1 = TM.diff_A_and_b(A, B, theta, 0)\n dA_2, dB_2 = TM.diff_A_and_b(A, B, theta, 1)\n dA_3, dB_3 = TM.diff_A_and_b(A, B, theta, 2)\n dA_4, dB_4 = TM.diff_A_and_b(A, B, theta, 3)\n Z = numpy.zeros_like(dA_1.todense())\n z = numpy.zeros_like(dB_1)\n \n print \"dA/dtheta_1 check:\", numpy.allclose(dA_1.todense(), TM.A1.todense())\n print \"dA/dtheta_2 check:\", numpy.allclose(dA_2.todense(), TM.A2.todense())\n print \"dA/dtheta_3 check:\", numpy.allclose(dA_3.todense(), Z)\n print \"dA/dtheta_4 check:\", numpy.allclose(dA_4.todense(), Z)\n\n print \"db/dtheta_1 check:\", numpy.allclose(dB_1, z)\n print \"db/dtheta_2 check:\", numpy.allclose(dB_2, z)\n print \"db/dtheta_3 check:\", numpy.allclose(dB_3, TM.B1)\n print \"db/dtheta_4 check:\", numpy.allclose(dB_4, TM.B2)\n\n\n #\n # Test the lcs model class\n #\n\n gLCS = LCSModel()\n gLCS.eval_A_and_b = TM.eval_A_and_b\n gLCS.diff_A_and_b = TM.diff_A_and_b\n \n gLCS.quiet=True\n\n x = gLCS.eval(theta)\n #print x.shape\n\n for k in range(p):\n print \"Primal solution for x_{}, matches spsolve calculation: {}\".\\\n format(k, numpy.allclose(x[:,k], spla.spsolve(A,B[:,k])))\n\n\n D = gLCS.jacobian(theta)\n\n # -- If theta[1]=0, and theta[2:3] are fixed, then there is an analytical\n # calculation for x(theta[0]), and in this case we can check the first\n # column of D.\n\n theta = numpy.array((5.1, 0, 1.2, 2.1))\n A, B = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta)\n\n for k in range(p):\n D_col_1 = -(1./theta[0]**2) * B[:,k]\n print \"First column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,0], D_col_1))\n\n\n # -- We'll use a numerical approximation to check the second column of D\n\n h = 0.000001\n theta = numpy.array((5.1, 1.1, 1.2, 2.1))\n dtheta = numpy.array((0., h, 0., 0.))\n A,B = TM.eval_A_and_b(theta)\n x = gLCS.eval(theta)\n D = gLCS.jacobian(theta)\n\n A_dt, B_dt = TM.eval_A_and_b(theta + dtheta)\n\n for k in range(p):\n x_dt = spla.spsolve(A_dt, B_dt[:,k])\n D_col_2_num_approx = (x_dt - x[:,k])/h\n max_abs_err = numpy.max(numpy.abs(D[k,:,1] - D_col_2_num_approx))\n\n print \"Second column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,1], D_col_2_num_approx))\n \n print \"Max abs error in second column of D_{}: {}\".\\\n format(k, max_abs_err)\n \n\n # -- If theta[0] and theta[1] are fixed, A(theta) is determined, and A^{-1}\n # is fixed. With a little math you can analytically calculate the third\n # and fourth columns of D. In fact x(theta) is linear in theta[2] and\n # theta[3], but not in theta[0] and theta[1].\n\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,_ = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta);\n\n for k in range(p):\n D_col_3 = spla.spsolve(A, TM.B1[:,k])\n\n print \"Third column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,2], D_col_3))\n\n\n for k in range(p):\n D_col_4 = spla.spsolve(A, TM.B2[:,k])\n \n print \"Fourth column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,3], D_col_4))", "def _regressor_partial_fit(X, y, js_model, n_iter):\n\n X_is_sparse = sp.issparse(X)\n y_is_sparse = sp.issparse(y)\n\n js_state = libpytsetlini.regressor_partial_fit(\n X, X_is_sparse,\n y, y_is_sparse,\n js_model,\n n_iter)\n\n return js_state", "def Train(ss):\n ss.StopNow = False\n while True:\n ss.TrainTrial()\n if ss.StopNow:\n break\n ss.Stopped()", "def fit_regression(\n polynomials,\n abscissas,\n evals,\n model=None,\n retall=False,\n):\n logger = logging.getLogger(__name__)\n abscissas = numpy.asarray(abscissas)\n if len(abscissas.shape) == 1:\n abscissas = abscissas.reshape(1, *abscissas.shape)\n evals = numpy.array(evals)\n\n poly_evals = polynomials(*abscissas).T\n shape = evals.shape[1:]\n if shape:\n evals = evals.reshape(evals.shape[0], int(numpy.prod(evals.shape[1:])))\n\n if model is None:\n uhat = linalg.lstsq(poly_evals, evals)[0]\n\n else:\n try:\n from sklearn.base import BaseEstimator\n except ImportError:\n raise ValueError(\n \"arg model != None requires that scikit-learn is installed\")\n\n if not isinstance(model, BaseEstimator):\n raise ValueError(\"model not recognized; \"\n \"Optional[sklearn.base.BaseEstimator] expected\")\n if hasattr(model, \"fit_intercept\"):\n assert not model.fit_intercept, (\n \"model %s must have fit_intercept=False\" % model.__class__.__name__)\n uhat = model.fit(poly_evals, evals).coef_.T\n\n if shape:\n evals = evals.reshape(evals.shape[0], *shape)\n\n approx_model = numpoly.sum((polynomials*uhat.T), -1)\n approx_model = approx_model.reshape(shape)\n\n if retall == 1:\n return approx_model, uhat\n if retall == 2:\n return approx_model, uhat, poly_evals\n return approx_model", "def lr_model(pars, solver_options,\n recompile=0):\n support_code = \"\"\"\n #include \"gliotransmission_models.h\"\n \"\"\"\n source_files = [os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/pycapi_utils.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/solvers/solver_options.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/solvers/stochastic_solvers.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir+'/code/gliotransmission_models.cpp')]\n code = \"\"\"\n // Version\n double version = 0.0;\n \n // Define astrocyte model\n lra astrocyte;\n \n // Declare output structure\n out_lra out;\n\n // Simulator\n out = astrocyte.simulate(pars,solver_options);\n\n //Output \n return_val = out.make_PyDict();\n \"\"\"\n libs = ['gsl', 'gslcblas', 'm']\n dirs = [os.path.join(os.path.expanduser('~'), base_dir+'/code/'),\n os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules'),\n os.path.join(os.path.expanduser('~'), base_dir+'/pycustommodules/solvers')]\n vars = ['pars', 'solver_options']\n olr = weave.inline(code,\n vars,\n support_code=support_code,\n sources=source_files,\n libraries=libs,\n library_dirs=dirs,\n include_dirs=dirs,\n runtime_library_dirs=dirs,\n type_converters=converters.blitz,\n compiler='gcc',\n extra_compile_args=['-std=c++11'],\n force=recompile)\n # Post-stimulus processing\n return olr", "def R(cls, *args, **kwargs):\n return cls(*args, model_type='regressor', **kwargs)", "def train_lrc(trX, trY, vaX, vaY, teX=None, teY=None, penalty='l1',\n C=2**np.arange(-4, 1).astype(np.float), seed=42):\n scores = []\n for i, c in tqdm(enumerate(C)):\n model = LogisticRegression(C=c, penalty=penalty,\n random_state=seed+i, tol=0.0001)\n model.fit(trX, trY)\n score = model.score(vaX, vaY)\n scores.append(score)\n c = C[np.argmax(scores)]\n model = LogisticRegression(C=c, penalty=penalty,\n random_state=seed+len(C), tol=0.0001)\n model.fit(trX, trY)\n return model", "def model_linear(train_x, train_y, test_x):\n train_x = sm.add_constant(train_x)\n model_fit = sm.OLS(train_y, train_x).fit()\n model_info = {'model': 'linear', 'R2': model_fit.rsquared, 'f_pvalue': model_fit.f_pvalue,\n 'const': model_fit.params.const, 'beta': model_fit.params.values[1]}\n predictions = model_fit.predict(sm.add_constant(test_x))\n return predictions, model_info", "def test_linear_fit_model_set(self):\n\n init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)\n x = np.arange(10)\n y_expected = init_model(x, model_set_axis=False)\n assert y_expected.shape == (2, 10)\n\n # Add a bit of random noise\n with NumpyRNGContext(_RANDOM_SEED):\n y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)", "def bias_var(x, y, z, first_poly = 4, complexity = 10, N = 100, method = 'OLS', seed = 42, lam = 0, train = 0.7, folds = 5):\n\n bias = np.zeros(complexity + 1)\n variance = np.zeros(complexity + 1)\n z_real = FrankeFunction(x, y)\n\n complx = np.arange(first_poly, first_poly + complexity + 1, 1)\n\n for i in range(complexity + 1):\n print(i)\n model = regression(x, y, z, k = first_poly + i, split = True, train = train, seed = seed)\n\n _, _, _, z_real_test = model.train_test(X = model.X_full, z = np.ravel(z_real), train = train, seed = seed)\n\n counter = 0\n z_tildes = np.zeros((np.size(z_real_test), N))\n for j in range(N):\n\n z_new = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)\n _, _, z_train, _ = model.train_test(X = model.X_full, z = np.ravel(z_new), train = train)\n if method == 'OLS':\n beta = model.OLS(z = z_train)\n elif method == 'Ridge':\n beta = model.Ridge(lam = lam, z = z_train)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, z = z_train)\n\n z_tilde = model.z_tilde(beta, X = model.X_test)\n z_tildes[:, j] = np.ravel(z_tilde)\n\n\n bias[i] = np.mean((np.ravel(z_real_test).reshape(-1, 1) - np.mean(z_tildes, axis = 1, keepdims = True))**2)\n variance[i] = np.mean(np.var(z_tildes, axis = 1, keepdims = True))\n\n plt.title(method + ' with N = ' + str(N) + ' times pr complexity')\n plt.plot(complx, bias, 'go--', label = 'Bias', color = 'blue')\n plt.plot(complx, variance, 'go--', label = 'Variance', color = 'red')\n #plt.ylim([np.min(errors_R2[2]*1.2), np.max(errors_R2[0]*1.2)])\n plt.legend()\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('Bias/variance', fontsize = 14)\n plt.tight_layout()\n plt.savefig(results_dir + 'bias_variance' + method + '.png')\n\n plt.show()", "def xlarge_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n #model.add(Dense(18, input_dim=12, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def train_all(self):\n for p in self.parameters():\n p.requires_grad = True\n return self", "def train_all_individual_models(dropout=0.998, hidden_layers=27, verbosity=2):\n for i, unit in enumerate(UNITS):\n print(\"Training the model for {} ({}/{})\".format(unit, i+1, len(UNITS)))\n train_model(load_enrolment_matrix(unit, from_pickle=True), dropout, hidden_layers, verbosity, save=unit)", "def create_model_eg1(my_learning_rate):\n # This is a first try to get a simple model that works\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=(8, 8, 15)))\n model.add(tf.keras.layers.Dense(units=32, activation='relu'))\n model.add(tf.keras.layers.Dense(units=32, activation='relu'))\n model.add(tf.keras.layers.Dense(units=1))\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=my_learning_rate),\n loss=\"mean_squared_error\",\n metrics=['MeanSquaredError'])\n\n return model", "def linear_regression_forecasting(x_train,y_train,x_valid,y_valid,x_test,y_test):\n y_train = y_train.reshape(TRAINING_BATCH_SIZE,N_PREDICTIONS*N_OUTPUT_FEATURES)\n y_valid = y_valid.reshape(VALIDATION_BATCH_SIZE,N_PREDICTIONS*N_OUTPUT_FEATURES)\n layer1 = keras.layers.Flatten(input_shape=[N_INPUT_STEPS,N_INPUT_FEATURES]) #input layer flattens each batch instance from [n_steps,n_input_features] to [n_steps*n_input_features]\n layer2 = keras.layers.Dense(N_PREDICTIONS*N_OUTPUT_FEATURES) #fully connected layer solves combination of linear equations\n model = keras.models.Sequential([layer1,layer2])\n model.compile(loss=\"mse\",optimizer=\"adam\")\n training_history = model.fit(x_train,y_train,epochs=N_EPOCHS,validation_data=(x_valid,y_valid),verbose=0)\n y_pred = model.predict(x_test, TESTING_BATCH_SIZE)\n y_pred = y_pred.reshape(TESTING_BATCH_SIZE,N_PREDICTIONS,N_OUTPUT_FEATURES)\n return training_history.history, y_pred, model" ]
[ "0.6492245", "0.64286274", "0.6315411", "0.62603295", "0.62471396", "0.62323004", "0.61873966", "0.61866516", "0.61369425", "0.6030381", "0.6029556", "0.60007995", "0.5999436", "0.5996053", "0.5956116", "0.5954355", "0.59472334", "0.5938865", "0.59243846", "0.5919388", "0.5903752", "0.5881064", "0.58477485", "0.5844881", "0.5826123", "0.581926", "0.57854646", "0.5781525", "0.57795435", "0.57545626", "0.5746252", "0.5733568", "0.5731546", "0.5721768", "0.5712356", "0.5699316", "0.5698514", "0.56973857", "0.568782", "0.56867296", "0.5680834", "0.5674403", "0.56645364", "0.56625116", "0.56547844", "0.5653979", "0.5651411", "0.5648197", "0.5647416", "0.56396323", "0.56316274", "0.56250405", "0.56239736", "0.56236297", "0.5619484", "0.5617693", "0.5617693", "0.56168586", "0.5616175", "0.5609845", "0.5607988", "0.5603184", "0.5602213", "0.5600413", "0.5594339", "0.55804247", "0.557982", "0.55782765", "0.55733615", "0.55709803", "0.5568225", "0.5566818", "0.5565578", "0.5553806", "0.5544219", "0.5539654", "0.5538025", "0.5536903", "0.5530174", "0.5529954", "0.5529826", "0.55295795", "0.5524984", "0.5524744", "0.55223155", "0.55220264", "0.5520558", "0.55154294", "0.5513432", "0.55090004", "0.5507714", "0.55053246", "0.5504569", "0.5502593", "0.5498895", "0.549527", "0.5494252", "0.5493989", "0.548978", "0.54858774", "0.54842126" ]
0.0
-1
Make a model for each trial using least squares separate (LSS)
def _lss_events_iterator(events_file): import pandas as pd import numpy as np events = pd.read_csv(events_file, sep='\t') trial_counter = dict([(t, 0) for t in np.unique(events['trial_type'])]) for trial_id in range(len(events)): trial_type = events.loc[trial_id, 'trial_type'] # make a copy of the dataframe events_trial = events.copy() # assign new name to all events from original condition trial_type_id = events_trial['trial_type'] == trial_type events_trial.loc[trial_type_id, 'trial_type'] = 'other' # assign the trial of interest to be its original value events_trial.loc[trial_id, 'trial_type'] = trial_type yield events_trial, trial_type, trial_counter[trial_type] trial_counter[trial_type] += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_SLM():\n samples = 10\n predictors = 3\n\n grid = list(create_parameter_grid(samples, predictors))\n Y = np.random.rand(samples, 10242, predictors)\n\n for i in range(len(grid)):\n # Skip exceptions that we know error.\n if grid[i][\"surf\"] is None:\n if grid[i][\"correction\"] is not None and \"rft\" in grid[i][\"correction\"]:\n continue\n if grid[i][\"Y_idx\"] > 1 and grid[i][\"two_tailed\"] is False:\n continue\n\n try:\n slm = SLM(\n model=grid[i][\"model\"],\n contrast=grid[i][\"contrast\"],\n surf=grid[i][\"surf\"],\n mask=grid[i][\"mask\"],\n correction=grid[i][\"correction\"],\n two_tailed=grid[i][\"two_tailed\"],\n )\n slm.fit(Y[:, :, 0 : grid[i][\"Y_idx\"]])\n except Exception as e:\n print(\"Error on run:\", i)\n print(\"SLM failed with the following parameters:\")\n print(\"Model: \", grid[i][\"model\"])\n print(\"Contrast: \", grid[i][\"contrast\"])\n print(\"Surface: \", grid[i][\"surf\"])\n print(\"Mask: \", grid[i][\"mask\"])\n print(\"Correction: \", grid[i][\"correction\"])\n print(\"Two_tailed: \", grid[i][\"two_tailed\"])\n print(\"Y_idx: \", grid[i][\"Y_idx\"])\n raise e", "def get_model( ):\n\n return Lasso(alpha = 1e-3, fit_intercept = True, precompute = True, max_iter = 1e4)", "def train_model(X_train, y_train):\n rgs = linear_model.Lasso(alpha=0.1)\n rgs.fit(X_train, y_train)\n return rgs", "def nnls_fit(self):\n\n def sprod(a,b): #simplecting inner product between two Pauli operators\n return int(not a.commutes(b))\n\n F1 = [] #First list of terms\n F2 = [] #List of term pairs\n fidelities = [] # list of fidelities from fits\n\n for datum in self._term_data.values():\n F1.append(datum.pauli)\n fidelities.append(datum.fidelity)\n #If the Pauli is conjugate to another term in the model, a degeneracy is present\n if self._issingle(datum):\n F2.append(datum.pauli)\n else:\n pair = datum.pair\n F2.append(pair)\n\n #create commutativity matrices\n M1 = [[sprod(a,b) for a in F1] for b in F1]\n M2 = [[sprod(a,b) for a in F1] for b in F2]\n\n #check to make sure that there is no degeneracy\n if np.linalg.matrix_rank(np.add(M1,M2)) != len(F1):\n raise Exception(\"Matrix is not full rank, something went wrong!\")\n \n #perform least-squares estimate of model coefficients and return as noisemodel \n coeffs,_ = nnls(np.add(M1,M2), -np.log(fidelities)) \n self.noisemodel = NoiseModel(self.layer._cliff_layer, F1, coeffs)", "def experiment_models(train, test, train_target, test_target):\n # Linear models\n linear_models = [(LinearRegression, {\"n_jobs\": -1}),\n (Lasso, {\"alpha\": 3}),\n (Ridge, {\"alpha\": 3}),\n (LinearSVR, {\"random_state\": 0, \"tol\": 1e-5})]\n\n # Add polynomial features\n poly = preprocessing.PolynomialFeatures(2)\n\n # scaler\n scaler = preprocessing.StandardScaler().fit(train)\n\n print(\"Use linear models with linear features\")\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")\n\n print(\"Use linear models with polynomial features\")\n train = poly.fit_transform(train)\n test = poly.transform(test)\n scaler = preprocessing.StandardScaler().fit(train)\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")", "def generate_models(R, u_t, inverse_transform, algo):\n model_list = []\n it_max = 10000 # maximum number of iterations after which the Lasso and SR3 are stopped to save computational time\n # in our experience, if the model converges at all, this is usually far sooner than 10000 iterations\n tol_iterativ = 10 * np.finfo(float).eps # convergence tolerance of SR3 and Lasso\n if algo == 'FoBa':\n log_epsilon_range = np.arange(-15., 15., 0.5)\n for log_epsilon in log_epsilon_range:\n w = FoBa(R, u_t, epsilon=10 ** log_epsilon, backwards_freq=1, maxit_f=20)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'Lasso':\n log_lambda_range = np.arange(-15., 15., 0.5) # l1 factor\n for log_lambda in log_lambda_range:\n # initialize Lasso model\n clf = linear_model.Lasso(alpha=10**log_lambda, copy_X=True, fit_intercept=True, max_iter=it_max,\n normalize=False, positive=False, precompute=False, random_state=None,\n selection='cyclic', tol=tol_iterativ, warm_start=False)\n clf.fit(R, u_t) # fit model\n w = clf.coef_\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'STRidge':\n log_lambda_range = np.arange(-15, 15., 1.) # l2 factor (Ridge)\n log_tol_range = np.arange(-16, 10., 1.)\n for log_lambda in log_lambda_range:\n for log_tol in log_tol_range:\n w = STRidge(R, u_t, maxit=1000, lam=10**log_lambda, tol=10**log_tol, normalize=2)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'SR3':\n # Uses python-matlab interface to directly use the original SR3 implementation.\n # Note that setting up the interface can be a bit tricky; if setting up the interface is too much effort,\n # just leave SR3 out of the 'algo_list' in the SITE file.\n t_sr3_start = time.time()\n eng = matlab.engine.start_matlab()\n eng.setup_matlab(nargout=0)\n log_lambda_range = np.arange(-15, 15., 1.) # l1 factor\n log_kappa_range = np.arange(-5, 6., 1.)\n for log_kappa in log_kappa_range:\n for log_lambda in log_lambda_range:\n R_matlab = matlab.double(R.tolist())\n u_t_matlab = matlab.double(u_t.tolist())\n # iters can be used to check if model converged or it_max was reached\n x, w, iters = eng.sr3(R_matlab, u_t_matlab, 'mode', '0', 'kap', (10**log_kappa).item(), 'lam',\n (10**log_lambda).item(), 'itm', it_max, 'tol', tol_iterativ.item(), 'ptf',\n 45000, nargout=3)\n w = np.asarray(w)\n initialize_model(w, model_list, algo, inverse_transform)\n eng.quit()\n print('Time for evaluation SR3: ', time.time() - t_sr3_start)\n\n else: raise ('The algorithm ' + str(algo) + ' is not implemented! (or a typo)')\n return model_list", "def best_fit(x, y, z, z_real, p = list(range(3, 15)), folds = 4, train = 0.7, seed = 42, n_lambda = 2001, n = 1, m = 1):\n lambdas = np.array([0] + np.logspace(-5.5, -1, n_lambda).tolist())\n polynomials = np.array(p)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n lambda_min_ridge = np.zeros(len(polynomials))\n lambda_min_lasso = np.zeros(len(polynomials))\n R2 = np.zeros((3, len(polynomials)))\n MSE = np.zeros((3, len(polynomials)))\n\n R2_data = np.zeros((3, len(polynomials)))\n MSE_data = np.zeros((3, len(polynomials)))\n\n\n for i in range(len(polynomials)):\n print(i + polynomials[0])\n ridge_sum = 0\n lasso_sum = 0\n model = regression(x, y, z, split = True, train = train, seed = seed, k = polynomials[i])\n z_test = np.ravel(np.copy(model.z_test))\n for j in range(n): #The mean of n times\n ridge_sum += model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True, n_lambda = n_lambda)[0]\n for j in range(m): #The mean of m times\n lasso_sum += model.lambda_best_fit(method = 'Lasso', fold = folds, n_lambda = n_lambda)[0]\n lambda_min_ridge[i] = ridge_sum/n\n lambda_min_lasso[i] = lasso_sum/m\n\n _,_, a, z_real_test = model.train_test(X = model.X_full, z = z_real, train = 0.7, seed = seed) #Both the training set and the test set for z_real in that order in list/tuple\n\n Beta_ols = model.OLS()\n Beta_ridge = model.Ridge(lam = lambda_min_ridge[i])\n Beta_lasso = model.Lasso(lam = lambda_min_lasso[i], max_iter = 1001)\n\n z_tilde_OLS = model.z_tilde(Beta_ols, X = model.X_test)\n z_tilde_Ridge = model.z_tilde(Beta_ridge, X = model.X_test)\n z_tilde_Lasso = model.z_tilde(Beta_lasso, X = model.X_test)\n\n R2[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_real_test)\n R2[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_real_test)\n R2[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n MSE[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_real_test)\n MSE[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_real_test)\n MSE[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n R2_data[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_test)\n R2_data[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_test)\n R2_data[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_test)\n\n MSE_data[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_test)\n MSE_data[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_test)\n MSE_data[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_test)\n\n _, _, lambdas = model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True)\n\n min_MSE = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n min_R2 = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n\n print('Minimum MSE with Frank, OLS: ', np.min(MSE[0]), ' Ridge: ', np.min(MSE[1]), ' Lasso: ', np.min(MSE[2]))\n print('With polynoms: ', np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Minimum MSE with Data, OLS: ', np.min(MSE_data[0]), ' Ridge: ', np.min(MSE_data[1]), ' Lasso: ', np.min(MSE_data[2]))\n print('With polynoms: ', np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2[0]), ' Ridge: ', np.max(R2[1]), ' Lasso: ', np.max(R2[2]))\n print('With polynoms: ', np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2_data[0]), ' Ridge: ', np.max(R2_data[1]), ' Lasso: ', np.max(R2_data[2]))\n print('With polynoms: ', np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n\n error_mins = np.array([[np.min(MSE[0]), np.min(MSE[1]), np.min(MSE[2])],\n [np.min(MSE_data[0]), np.min(MSE_data[1]), np.min(MSE_data[2])],\n [np.max(R2[0]), np.max(R2[1]) , np.max(R2[2])],\n [np.max(R2_data[0]), np.max(R2_data[1]), np.max(R2_data[2])],\n [np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0]],\n [np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0]],\n [np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0]],\n [np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0]]]).T\n\n text = ['MSE Franke', 'MSE Data','R\\(^2\\) Franke', 'R\\(^2\\) Data']\n print(latex_print(error_mins, text = text))\n\n print('Ridge lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Ridge lambda, lowest indexes for Data: ', np.argmin(MSE_data[2]))\n print(lambda_min_ridge)\n print('Lasso lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Lasso lambda, lowest indexes for Data: ', np.argmin(R2_MSE[2]))\n print(lambda_min_lasso)\n #Real Franke\n\n plt.plot(polynomials, R2[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE.png')\n\n plt.show()\n\n #Noise Franke\n\n plt.plot(polynomials, R2_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and data', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly_data.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and data', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE_data.png')\n\n plt.show()\n\n #Polynomial and lambda\n\n plt.plot(polynomials, lambda_min_ridge, 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, lambda_min_lasso, 'go--', label = 'Lasso', color = 'green')\n\n plt.title('The \\'best\\' lambda pr polynomial')\n plt.ylabel('Lambda')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n plt.savefig(results_dir + 'ridge_lasso_lambda_poly.png')\n plt.show()", "def least_squares_training(self, inputs, targets):\n self._rbf_forward(inputs)\n a = self.rbf_outputs.T @ self.rbf_outputs\n b = self.rbf_outputs.T @ targets\n self.slp_weights = np.linalg.solve(a, b)", "def nnRegression(data):", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def get_sgd_solution(TRAINING_PHI, TEST_PHI, VAL_PHI, W_Now, TrainingData,\n TrainingTarget, TestData, ValData):\n # Gradient Descent Solution for Linear Regression\n La = 2\n # learningRate = 0.01\n L_Erms_Val, L_Erms_TR, L_Erms_Test, L_Accuracy_Test, W_Mat = [], [], [], [], []\n\n for i in range(0, 400):\n\n # print (f'---------Iteration: {i} M{M} LR {learningRate} L :{C_Lambda}--------------')\n Delta_E_D = -np.dot(\n (TrainingTarget[i] - np.dot(np.transpose(W_Now), TRAINING_PHI[i])),\n TRAINING_PHI[i])\n La_Delta_E_W = np.dot(La, W_Now)\n Delta_E = np.add(Delta_E_D, La_Delta_E_W)\n Delta_W = -np.dot(learningRate, Delta_E)\n W_T_Next = W_Now + Delta_W\n W_Now = W_T_Next\n\n #-----------------TrainingData Accuracy---------------------#\n TR_TEST_OUT = GetValTest(TRAINING_PHI, W_T_Next)\n Erms_TR = GetErms(TR_TEST_OUT, TrainingTarget)\n L_Erms_TR.append(float(Erms_TR.split(',')[1]))\n\n #-----------------ValidationData Accuracy---------------------#\n VAL_TEST_OUT = GetValTest(VAL_PHI, W_T_Next)\n Erms_Val = GetErms(VAL_TEST_OUT, ValDataAct)\n L_Erms_Val.append(float(Erms_Val.split(',')[1]))\n\n #-----------------TestingData Accuracy---------------------#\n TEST_OUT = GetValTest(TEST_PHI, W_T_Next)\n Erms_Test = GetErms(TEST_OUT, TestDataAct)\n L_Erms_Test.append(float(Erms_Test.split(',')[1]))\n L_Accuracy_Test.append(float(Erms_Test.split(',')[0]))\n\n return ([L_Erms_TR, L_Erms_Val, L_Erms_Test, L_Accuracy_Test])", "def TrainTrial(ss):\n\n if ss.NeedsNewRun:\n ss.NewRun()\n\n ss.TrainEnv.Step()\n\n # Key to query counters FIRST because current state is in NEXT epoch\n # if epoch counter has changed\n epc = env.CounterCur(ss.TrainEnv, env.Epoch)\n chg = env.CounterChg(ss.TrainEnv, env.Epoch)\n\n if chg:\n ss.LogTrnEpc(ss.TrnEpcLog)\n if ss.ViewOn and ss.TrainUpdt.value > leabra.AlphaCycle:\n ss.UpdateView(True)\n if ss.TestInterval > 0 and epc%ss.TestInterval == 0: # note: epc is *next* so won't trigger first time\n ss.TestAll()\n if epc >= ss.MaxEpcs or (ss.NZeroStop > 0 and ss.NZero >= ss.NZeroStop):\n # done with training..\n ss.RunEnd()\n if ss.TrainEnv.Run.Incr(): # we are done!\n ss.StopNow = True\n return\n else:\n ss.NeedsNewRun = True\n return\n\n # note: type must be in place before apply inputs\n ss.Net.LayerByName(\"Output\").SetType(emer.Target)\n ss.ApplyInputs(ss.TrainEnv)\n ss.AlphaCyc(True) # train\n ss.TrialStats(True) # accumulate", "def fit_lr_model(df, X_train, y_train, X_test, y_test, mask_test):\n print(\"**** LINEAR REGRESSION ****\")\n lin_mod = sm.OLS(y_train, sm.add_constant(X_train))\n fit_lin = lin_mod.fit()\n print(fit_lin.summary())\n\n y_pred_test = fit_lin.predict(sm.add_constant(X_test))\n df_test = pd.concat([df[mask_test][['player','wkts','year1_wkts_pm']].reset_index(),\n pd.DataFrame(y_pred_test).reset_index()],axis=1,)\n df_test = df_test.drop('index',axis=1)\n df_test.columns = ['player','wkts','wkts_baseline','wkts_exp']\n\n df_by_player = df_test.groupby('player').sum()\n\n print('Explained Variance (LR model): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Explained Variance (Baseline): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print('Mean Squared Error (LR model): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Mean Squared Error (Baseline): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print(' ')", "def run(self, X, Y, model):\n\n p0 = X.iloc[0] # read in the input info\n params = lmfit.Parameters() # empty parameter class\n success = True # check for success\n\n if model == 'Medlyn':\n min, max = self.param_space('g1')\n params.add('g1', p0.g1, min=min, max=max)\n min, max = self.param_space('sref')\n params.add('sref', p0.sref, min=min, max=max)\n\n if model == 'Eller':\n min, max = self.param_space('kmax')\n params.add('kmaxS1', p0.kmaxS1, min=min, max=max)\n\n if (model == 'ProfitMax') or (model == 'ProfitMax2'):\n min, max = self.param_space('kmax')\n params.add('kmax', p0.kmax, min=min, max=max)\n\n # the following models all require the Sperry kmax as an input!\n if model == 'Tuzet':\n min, max = self.param_space('g1')\n params.add('g1T', p0.g1T, min=min, max=max)\n\n if 'Tleaf' in X.columns: # vary g1 and kmax\n min, max = self.param_space('kmax')\n params.add('kmaxT', p0.kmax, min=min, max=max)\n\n else: # vary g1 and Pref, sref fixed\n min, max = self.param_space('PrefT', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PrefT):\n params.add('PrefT', p0.PrefT, min=min, max=max)\n\n else:\n params.add('PrefT', -p0.P88, min=min, max=max)\n\n if model == 'WUE-LWP':\n min, max = self.param_space('Lambda')\n params.add('Lambda', p0.Lambda, min=min, max=max)\n\n if model == 'CGain':\n min, max = self.param_space('Kappa')\n params.add('Kappa', p0.Kappa, min=min, max=max)\n\n if model == 'CMax':\n min, max = self.param_space('Alpha')\n params.add('Alpha', p0.Alpha, min=min, max=max)\n min, max = self.param_space('Beta')\n params.add('Beta', p0.Beta, min=min, max=max)\n\n if model == 'SOX-OPT':\n min, max = self.param_space('kmax')\n params.add('kmaxS2', p0.kmaxS2, min=min, max=max)\n\n if model == 'LeastCost':\n min, max = self.param_space('kmax')\n params.add('kmaxLC', p0.kmaxLC, min=min, max=max)\n min, max = self.param_space('Eta')\n params.add('Eta', p0.Eta, min=min, max=max)\n\n if model == 'CAP':\n min, max = self.param_space('krl')\n params.add('krlC', p0.krlC, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritC):\n params.add('PcritC', p0.PcritC, min=min, max=max)\n\n else:\n params.add('PcritC', -p0.P88, min=min, max=max)\n\n if model == 'MES':\n min, max = self.param_space('krl')\n params.add('krlM', p0.krlM, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritM):\n params.add('PcritM', p0.PcritM, min=min, max=max)\n\n else:\n params.add('PcritM', -p0.P88, min=min, max=max)\n\n if not os.path.isdir(self.opath): # create output dir\n os.makedirs(self.opath)\n\n # run the minimizer\n if self.method == 'emcee':\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, steps=self.steps,\n nwalkers=self.nchains, burn=self.burn,\n thin=self.thin, is_weighted=False,\n progress=False, nan_policy='omit')\n\n else:\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, nan_policy='omit')\n\n for param in out.params.values():\n\n if np.isclose(param.value, param.init_value):\n params[param.name] = lmfit.Parameter(name=param.name,\n value=1.5 *\n param.init_value)\n out = lmfit.minimize(fres, params,\n args=(model, X, Y, self.inf_gb,),\n method=self.method,\n nan_policy='omit')\n\n if not os.path.isfile(os.path.join(self.opath, '%s.txt' % (model))):\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'w+')\n\n else: # append to existing file\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'a+')\n\n txt.write('\\n')\n txt.write(lmfit.fit_report(out))\n\n if not success:\n txt.write('\\n## Warning: had to fix first parameter value')\n\n txt.write('\\n')\n txt.close() # close text file\n\n return out.params.valuesdict()", "def fit(zs, ys, L, lam_1, lam_2, rho=10, maxiter=100, verbose=True, warm_start=None,\n eps_abs = 1e-5, eps_rel = 1e-5):\n K = int(zs.max() + 1)\n N, n = ys.shape\n Ys, cts = [], []\n for i in range(K):\n idx = zs == i\n cts.append(idx.sum()) #N_i, number of samples per z\n ys_i = ys[idx]\n Ys.append(ys_i.T @ ys_i)\n \n if verbose:\n print (\"Fitting covariance stratified model.\")\n print (\"%d stratification values, %d data points, %d dimensions\" % (K, N, n))\n print (\"%d\" % (K * n * n), \"optimization variables\")\n print (\"lam_1 = %3.3e, lam_2 = %3.3e, rho = %3.3e, maxiter=%d\" % (lam_1, lam_2, rho, maxiter))\n print (\"count per stratification value:\", cts)\n print (Ys[0].shape)\n\n shape = (K, n, n)\n if warm_start is None:\n warm_start = []\n for _ in range(5):\n warm_start.append(np.zeros(shape))\n inv_covs_loss, inv_covs_reg, inv_covs_lapl, U_1, U_2 = warm_start\n \n solve = factorized(L.tocsc() + rho * sparse.eye(K, format='csc'))\n \n for _ in range(maxiter):\n # inv_covs_loss\n for i in range(K):\n if cts[i] == 0:\n inv_covs_loss[i] = (inv_covs_lapl[i] - U_1[i])\n continue\n w, v = np.linalg.eigh((rho/cts[i]) * (inv_covs_lapl[i] - U_1[i]) - Ys[i]/cts[i])\n w_new = (w*cts[i]/rho + np.sqrt((w*cts[i]/rho)**2 + 4*cts[i]/rho))/2\n inv_covs_loss[i] = v @ np.diag(w_new) @ v.T \n \n # inv_covs_reg\n for i in range(K):\n inv_covs_reg[i][np.arange(n), np.arange(n)] = np.diag(inv_covs_lapl[i] - U_2[i] - lam_1/rho) #diagonal elements\n \n st2 = soft_threshold(inv_covs_lapl[i] - U_2[i], lam_2 / rho)\n od_idx = np.where(~np.eye(n,dtype=bool)) #gets off_diags\n inv_covs_reg[i][od_idx] = st2[od_idx] \n \n # inv_covs_lapl\n rhs = (inv_covs_loss + inv_covs_reg) / 2 + (U_1 + U_2) / 2\n rhs *= rho\n inv_covs_lapl_new = solve(rhs.reshape(K, n*n)).reshape(shape)\n S = rho * np.repeat(inv_covs_lapl_new - inv_covs_lapl, 2, axis=0)\n inv_covs_lapl = inv_covs_lapl_new.copy()\n\n # U_1\n R_1 = inv_covs_loss - inv_covs_lapl\n U_1 += R_1\n \n # U_2\n R_2 = inv_covs_reg - inv_covs_lapl\n U_2 += R_2\n \n R = np.concatenate([R_1, R_2], axis=0)\n \n # stopping criterion\n eps_pri = np.sqrt(2 * K * n * n) * eps_abs + eps_rel * max(np.linalg.norm(np.concatenate([inv_covs_loss, inv_covs_reg], axis=0)),\n np.linalg.norm(np.repeat(inv_covs_lapl, 2, axis=0)))\n eps_dual = np.sqrt(K * n * n) * eps_abs + eps_rel * np.linalg.norm(np.concatenate([U_1, U_2], axis=0))\n if verbose:\n print (np.linalg.norm(R), np.linalg.norm(S), eps_pri, eps_dual)\n \n return inv_covs_loss, inv_covs_reg, inv_covs_lapl", "def get_linear_model():\n\n ss = StandardScaler()\n lr = LogisticRegression(penalty='l2', max_iter=1000, class_weight=None) # ridge\n\n lr_model = Pipeline(steps=(['scale', ss], ['clf', lr])) # pipeline\n\n lr_model_params = {\n 'clf__C':loguniform(1e-3,1e3)\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV\n linear_model = RandomizedSearchCV(lr_model, lr_model_params, n_iter=100, cv=3)\n\n return clone(linear_model)", "def linear_regression(x_train, t_train, basis, bias,reg_lambda=0, degree=1, mu=0, s=1):\n \n # Construct the design matrix.\n # Pass the required parameters to this function\n \n phi = design_matrix(x_train,basis,degree,bias,mu,s) \n #print(x_train.shape) \n # Learning Coefficients\n if reg_lambda > 0:\n I=np.identity((phi.shape[1]),dtype=int)\n inv = np.linalg.inv((reg_lambda*I)+(phi.T@phi))\n w = inv@(phi.T@t_train) \n # regularized regression\n else:\n # no regularization \n w = np.linalg.pinv(phi)@t_train\n \n pred_train=phi@w\n train_err = np.sqrt((np.square(pred_train-t_train)).mean())\n return (w, train_err)", "def test_lcsmodel_class():\n\n # Set the problem size.\n n = 1000\n p = 3\n\n # Define the test model\n TM = test.Model2(n,p)\n\n # Note: diff_A/diff_b do not require A/b as an input in this case,\n # but in the more general case they might.\n\n # Check the basic model calculations.\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,B = TM.eval_A_and_b(theta)\n\n dA_1, dB_1 = TM.diff_A_and_b(A, B, theta, 0)\n dA_2, dB_2 = TM.diff_A_and_b(A, B, theta, 1)\n dA_3, dB_3 = TM.diff_A_and_b(A, B, theta, 2)\n dA_4, dB_4 = TM.diff_A_and_b(A, B, theta, 3)\n Z = numpy.zeros_like(dA_1.todense())\n z = numpy.zeros_like(dB_1)\n \n print \"dA/dtheta_1 check:\", numpy.allclose(dA_1.todense(), TM.A1.todense())\n print \"dA/dtheta_2 check:\", numpy.allclose(dA_2.todense(), TM.A2.todense())\n print \"dA/dtheta_3 check:\", numpy.allclose(dA_3.todense(), Z)\n print \"dA/dtheta_4 check:\", numpy.allclose(dA_4.todense(), Z)\n\n print \"db/dtheta_1 check:\", numpy.allclose(dB_1, z)\n print \"db/dtheta_2 check:\", numpy.allclose(dB_2, z)\n print \"db/dtheta_3 check:\", numpy.allclose(dB_3, TM.B1)\n print \"db/dtheta_4 check:\", numpy.allclose(dB_4, TM.B2)\n\n\n #\n # Test the lcs model class\n #\n\n gLCS = LCSModel()\n gLCS.eval_A_and_b = TM.eval_A_and_b\n gLCS.diff_A_and_b = TM.diff_A_and_b\n \n gLCS.quiet=True\n\n x = gLCS.eval(theta)\n #print x.shape\n\n for k in range(p):\n print \"Primal solution for x_{}, matches spsolve calculation: {}\".\\\n format(k, numpy.allclose(x[:,k], spla.spsolve(A,B[:,k])))\n\n\n D = gLCS.jacobian(theta)\n\n # -- If theta[1]=0, and theta[2:3] are fixed, then there is an analytical\n # calculation for x(theta[0]), and in this case we can check the first\n # column of D.\n\n theta = numpy.array((5.1, 0, 1.2, 2.1))\n A, B = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta)\n\n for k in range(p):\n D_col_1 = -(1./theta[0]**2) * B[:,k]\n print \"First column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,0], D_col_1))\n\n\n # -- We'll use a numerical approximation to check the second column of D\n\n h = 0.000001\n theta = numpy.array((5.1, 1.1, 1.2, 2.1))\n dtheta = numpy.array((0., h, 0., 0.))\n A,B = TM.eval_A_and_b(theta)\n x = gLCS.eval(theta)\n D = gLCS.jacobian(theta)\n\n A_dt, B_dt = TM.eval_A_and_b(theta + dtheta)\n\n for k in range(p):\n x_dt = spla.spsolve(A_dt, B_dt[:,k])\n D_col_2_num_approx = (x_dt - x[:,k])/h\n max_abs_err = numpy.max(numpy.abs(D[k,:,1] - D_col_2_num_approx))\n\n print \"Second column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,1], D_col_2_num_approx))\n \n print \"Max abs error in second column of D_{}: {}\".\\\n format(k, max_abs_err)\n \n\n # -- If theta[0] and theta[1] are fixed, A(theta) is determined, and A^{-1}\n # is fixed. With a little math you can analytically calculate the third\n # and fourth columns of D. In fact x(theta) is linear in theta[2] and\n # theta[3], but not in theta[0] and theta[1].\n\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A,_ = TM.eval_A_and_b(theta)\n D = gLCS.jacobian(theta);\n\n for k in range(p):\n D_col_3 = spla.spsolve(A, TM.B1[:,k])\n\n print \"Third column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,2], D_col_3))\n\n\n for k in range(p):\n D_col_4 = spla.spsolve(A, TM.B2[:,k])\n \n print \"Fourth column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,3], D_col_4))", "def experiment_linear_conv_ls(_):\n # Min dft1-norm solution found (norm=1.9895)\n adv_norm_type = 'dftinf'\n dual_norm_type = 'dft1'\n attack_step_dir = 'dftinf_sd' # 'dftinf'\n\n module_name = 'train'\n # log_dir = 'runs_linear_conv_ls_%s' % adv_norm_type\n log_dir = 'runs_linear_conv_ls_normfix_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [1, 2, 4, 8, 16, 32] # separable >= 1\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 1), # 500\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n # ('lr', 0.1),\n ('niters', 1), # 10\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n\n # Model hyper-parameters\n conv_linear_params = nameit('model', [\n ('arch', 'conv_linear'),\n ('nlayers', 2),\n ('regularizer', 'none'),\n ])\n\n params = []\n\n # GD line search implicit bias\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 100000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+conv_linear_params+gd_ls)]\n\n return params, log_dir, module_name, exclude", "def create_sts_model(train_x, train_y):\n model = GaussianNB()\n model.fit(train_x, train_y)\n save_model(model, \"simple_time_series\")\n return model", "def train_model(args, tr_sparse):\n tf.logging.info('Train Start: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n \n # generate model\n input_tensor, row_factor, col_factor, model = wals.wals_model(tr_sparse,\n args.latent_factors,\n args.regularization,\n args.unobs_weight,\n args.weights,\n args.wt_type,\n args.feature_wt_exp,\n args.feature_wt_factor)\n \n # factorize matrix\n session = wals.simple_train(model, input_tensor, args.num_iters)\n \n tf.logging.info('Train Finish: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n \n # evaluate output factor matrices\n output_row = row_factor.eval(session=session)\n output_col = col_factor.eval(session=session)\n \n # close the training session \n session.close()\n \n return output_row, output_col", "def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()", "def Train(ss):\n ss.StopNow = False\n while True:\n ss.TrainTrial()\n if ss.StopNow:\n break\n ss.Stopped()", "def experiment_linear_tradeoff_linf(_):\n adv_norm_type = 'linf'\n dual_norm_type = 'l1'\n # Min l1-norm solution found (norm=0.6876)\n attack_eps = 1/0.6876\n attack_step_dir = 'sign_grad'\n module_name = 'train'\n log_dir = 'runs_linear_tradeoff_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [32]\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 500),\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10),\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n params = []\n\n # reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]\n # Between 1e-3 and 1e-1 for d/n=10 the adv robustness drops\n reg_coeff += [3e-3, 5e-3, 3e-2, 5e-2, 3e-1, 5e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n\n # Explicit regularization with line search\n # njobs=3*6*20*4*2=2880\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n for i in [1] + list(np.arange(0.1, 2, 0.2)): # [0.1, 0.3, 0.5, 0.7, 1, 1.3]:\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_iter', float(attack_eps) * i),\n ('eps_tot', float(attack_eps) * i),\n ]))\n params += [OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)]\n\n return params, log_dir, module_name, exclude", "def work(i, kfolds, alphas):\n\t# load data\n\tTheta = np.loadtxt('Regression_Data/Theta.txt')\n\tdadt = np.loadtxt('Regression_Data/a_dot.txt')\n\tnsamples, nfeatures = Theta.shape\n\tnn = dadt.shape[1]\n \n\t# average mean square error across the folds\n\tMSE_mean = np.zeros(len(alphas))\n\tMSE_std = np.zeros(len(alphas))\n\tMSE_full = np.zeros(len(alphas))\n\tMSE_full_rel = np.zeros(len(alphas))\n\n\t# number of nonzero coefficients\n\tnnz = np.zeros(len(alphas))\n\tcomm = MPI.COMM_WORLD\n\t# coefficients\n\tcoeffs = np.zeros((len(alphas), nfeatures))\n\n\tfor j, alpha in enumerate(alphas):\n\t\tmodel = linear_model.LassoCV(cv=kfolds,\n\t\t\t\t\t\talphas=[alpha],\n\t\t\t\t\t\tfit_intercept=False,\n\t\t\t\t\t\tmax_iter=3000,\n\t\t\t\t\t\ttol=1e-4).fit(Theta, dadt[:, i])\n \n\t\n\t\tprint('Worker %d :: doing alpha=%.2e :: completed %.2f %%\\n' % (comm.Get_rank(), model.alpha_, 100.0*float(j+1)/len(alphas)))\n\n\t\tsys.stdout.flush()\n\t\t# apparently this mse_path is already taking into\n\t\t# account the whole dataset, so we do not need to multiply by kfolds\n\t\tcoeffs[j] = model.coef_\n\t\tMSE_mean[j] = np.sqrt(nsamples*np.mean(model.mse_path_))\n\t\tMSE_std[j] = np.sqrt(np.std(nsamples*model.mse_path_))\n\n\t\t#MSE_full_rel[j] = np.mean(((np.dot(Theta, model.coef_) - dadt[:, i])**2)/np.linalg.norm(dadt[:, i])**2)\n\t\tMSE_full_rel[j] = np.mean(np.linalg.norm(np.dot(Theta, model.coef_) - dadt[:, i])/np.linalg.norm(dadt[:, i]))\t\t\n\t\t\n\t\t#MSE_full[j] = np.mean((np.dot(Theta, model.coef_) - dadt[:, i])**2)\t\t\n\t\tMSE_full[j] = np.mean(np.linalg.norm(np.dot(Theta, model.coef_) - dadt[:, i]))\n\t\t\n\t\tnnz[j] = np.count_nonzero(model.coef_)\n\n\t\t# save data\n\t\ttry:\n\t\t\t#shutil.rmtree('Regression_Results')\n\t\t\tos.mkdir('Regression_Results')\n\t\texcept OSError:\n\t\t\tpass\n\n\t\t\n\t\tnp.savetxt('Regression_Results/MSE_mean_%03d' % i, MSE_mean,delimiter=' ')\n\t\tnp.savetxt('Regression_Results/MSE_std_%03d' % i, MSE_std,delimiter=' ')\n\t\tnp.savetxt('Regression_Results/MSE_full_%03d' % i, MSE_full,delimiter= ' ')\n\t\tnp.savetxt('Regression_Results/MSE_full_rel_%03d' % i, MSE_full_rel,delimiter= ' ')\n\t\tnp.savetxt('Regression_Results/coeffs_%03d' % i, coeffs,delimiter = ' ')\n\t\tnp.savetxt('Regression_Results/nnz_%03d' % i, nnz,delimiter = ' ')\n\n\t\tprint('Done i = %03d\\n' % i)\n\treturn True", "def solver_mll(X, y, C, S, alpha=0.1, max_iter=1000, tol=1e-4, positive=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False, positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n old_theta = C[:, None] * S\n\n for i in range(max_iter):\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(abs(theta).max(), abs(old_theta).max(), 1.)\n old_theta = theta.copy()\n\n if dll < tol:\n break\n\n if i == max_iter - 1:\n warnings.warn('Objective did not converge.' +\n ' You might want' +\n ' to increase the number of iterations.' +\n ' Fitting data with very small alpha' +\n ' may cause precision problems.',\n ConvergenceWarning)\n return C, S, i", "def fit_double_lasso(i):\n \n lasso_1 = linear_model.LassoCV(n_jobs=4, cv=5, max_iter=5000, tol=0.001, selection='random', n_alphas=20, fit_intercept=False, eps=0.0001)\n lasso_2 = linear_model.LassoCV(n_jobs=4, cv=5, max_iter=5000, tol=0.001, selection='random', n_alphas=20, fit_intercept=False, eps=0.0001)\n \n \n lasso_1.fit(X1_train, Y1_train[:, i])\n lasso_2.fit(X2_train, Y2_train[:, i])\n\n return lasso_1, lasso_2", "def optimize(self, trial):\r\n num_leaves = trial.suggest_int(\"num_leaves\", 6, 50)\r\n min_child_samples = trial.suggest_int(\"min_child_samples\", 100, 500)\r\n min_child_weight = trial.suggest_uniform(\"min_child_weight\", 1, 7)\r\n subsample = trial.suggest_uniform(\"subsample\", 0.6, 1)\r\n colsample_bytree = trial.suggest_uniform(\"colsample_bytree\", 0.6, 1)\r\n reg_alpha = trial.suggest_uniform(\"reg_alpha\", 0.1, 100)\r\n reg_lambda = trial.suggest_uniform(\"reg_lambda\", 0.1, 100)\r\n\r\n model = LGBMRegressor(\r\n num_leaves=num_leaves,\r\n min_child_samples=min_child_samples,\r\n min_child_weight=min_child_weight,\r\n subsample=subsample,\r\n colsample_bytree=colsample_bytree,\r\n reg_alpha=reg_alpha,\r\n reg_lambda=reg_lambda,\r\n )\r\n\r\n model = ModelTrainer(file_object=self.file_object).get_trained_model(\r\n model, self.X_train, self.y_train\r\n )\r\n r_squared, rmse = ModelScorer(file_object=self.file_object).get_model_scores(\r\n model, self.X_test, self.y_test\r\n )\r\n\r\n return r_squared", "def test_linear_fit_model_set(self):\n\n init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)\n x = np.arange(10)\n y_expected = init_model(x, model_set_axis=False)\n assert y_expected.shape == (2, 10)\n\n # Add a bit of random noise\n with NumpyRNGContext(_RANDOM_SEED):\n y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def train_and_validate(trnK, trnY, valK, valY, Cs):\n models = []\n trn_error = []\n val_error = []\n sup_vect = []\n\n for C in Cs:\n #Training\n model = train(trnK, trnY, C)\n trn_error.append((100 - evaluate(trnK, trnY, model)) / 100)\n sup_vect.append(len(model.get_SV()))\n models.append(model)\n #Evaluate\n val_error.append((100 - evaluate(valK, valY, model)) / 100)\n return(models, trn_error, val_error, sup_vect)", "def model_linear(train_x, train_y, test_x):\n train_x = sm.add_constant(train_x)\n model_fit = sm.OLS(train_y, train_x).fit()\n model_info = {'model': 'linear', 'R2': model_fit.rsquared, 'f_pvalue': model_fit.f_pvalue,\n 'const': model_fit.params.const, 'beta': model_fit.params.values[1]}\n predictions = model_fit.predict(sm.add_constant(test_x))\n return predictions, model_info", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n w = initial_w.copy()\n ws = [w]\n loss = compute_loss_LS(y, tx, w)\n losses = [loss]\n for iter in range(max_iters):\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=1, num_batches=1):\n gradient = compute_gradient_LS(y_batch, tx_batch, w)\n w -= gamma * gradient\n loss = compute_loss_LS(y_batch, tx_batch, w)\n ws.append(w)\n losses.append(loss)\n # print(\"Stochastic Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}\".format(\n # bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n\n return losses[-1], ws[-1]", "def learnModel(self, X):\n if not scipy.sparse.isspmatrix_lil(X): \n raise ValueError(\"Input matrix must be lil_matrix\")\n \n oldZ = scipy.sparse.lil_matrix(X.shape)\n omega = X.nonzero()\n tol = 10**-6 \n \n ZList = []\n \n for lmbda in self.lmbdas: \n gamma = self.eps + 1\n \n while gamma > self.eps: \n newZ = oldZ.copy()\n print(\"Adding oldZ entries\")\n newZ[omega] = oldZ[omega] \n print(\"Done\") \n newZ = newZ.tocsc()\n \n U, s, V = self.svdSoft(newZ, lmbda, self.k)\n #Get an \"invalid value encountered in sqrt\" warning sometimes \n newZ = scipy.sparse.csc_matrix((U*s).dot(V.T))\n \n oldZ = oldZ.tocsr()\n normOldZ = SparseUtils.norm(oldZ)**2\n \n if abs(normOldZ) > tol: \n gamma = SparseUtils.norm(newZ - oldZ)**2/normOldZ\n \n if SparseUtils.norm(newZ - oldZ)**2 < tol: \n gamma = 0 \n \n oldZ = newZ \n \n ZList.append(newZ)\n \n if self.lmbdas.shape[0] != 1: \n return ZList \n else: \n return ZList[0]", "def fit_all_synthesis(t, l, bounds=(0, np.inf), alpha_0=1, beta_0=1, gamma_0=1):\n l = l.A if issparse(l) else l\n\n tau = np.hstack((0, t))\n x = np.hstack((0, l))\n\n f_lsq = lambda p: sol_u(tau, 0, p[0], p[1]) + sol_s(tau, 0, 0, p[0], p[1], p[2]) - x\n ret = least_squares(f_lsq, np.array([alpha_0, beta_0, gamma_0]), bounds=bounds)\n return ret.x[0], ret.x[1], ret.x[2]", "def train(self, i):\n\t\tlearningRate = self.learningRateFn(i)\n\t\tdiscountFactor = self.discountFactorFn(i)\n\t\ttrainingTuples = []\n\t\twhile self.currentSAVRSAVIndex < len(self.SAVRSAV):\n\t\t\tcurrentSAVRSAV = self.SAVRSAV[self.currentSAVRSAVIndex]\n\t\t\tvalue = currentSAVRSAV['predictedValueOfAction'] + learningRate * (currentSAVRSAV['reward'] + discountFactor*currentSAVRSAV['predictedValueOfNewAction'] - currentSAVRSAV['predictedValueOfAction'])\n\t\t\ttrainingTuples.append((currentSAVRSAV['state'],currentSAVRSAV['action'],value))\n\t\t\tself.currentSAVRSAVIndex += 1\n\t\treturn self.neuralNet.train(trainingTuples)", "def sslim_train(A, B, l1_reg=0.001, l2_reg=0.0001):\n alpha = l1_reg + l2_reg\n l1_ratio = l1_reg / alpha\n\n model = SGDRegressor(\n penalty='elasticnet',\n fit_intercept=False,\n alpha=alpha,\n l1_ratio=l1_ratio\n )\n\n # Following cSLIM proposal on creating an M' matrix = [ M, FT]\n # * alpha is used to control relative importance of the side information\n #Balpha = np.sqrt(alpha) * B\n B = B[:, :-3]\n Balpha = B\n\n Mline = vstack((A, Balpha), format='lil')\n m, n = A.shape\n\n # Fit each column of W separately\n W = lil_matrix((n, n))\n\n columns = Mline.shape[1]\n\n for j in range(columns):\n if j % 50 == 0:\n print '-> %2.2f%%' % ((j / float(columns)) * 100)\n\n mlinej = Mline[:, j].copy()\n\n # We need to remove the column j before training\n Mline[:, j] = 0\n\n model.fit(Mline, mlinej.toarray().ravel())\n\n # We need to reinstate the matrix\n Mline[:, j] = mlinej\n\n w = model.coef_\n\n # Removing negative values because it makes no sense in our approach\n w[w < 0] = 0\n\n for el in w.nonzero()[0]:\n W[(el, j)] = w[el]\n\n return W", "def fit_LuEd(self, wl, Ls, Lu, Ed, params, weights, verbose=True):\n\n\t\t\tdef min_funct(params):\n\t\t\t\tp = params.valuesdict() \n\t\t\t\n\t\t\t\tRrs_modelled, Rrs_refl, Lu_Ed_modelled = self.model(beta = p['beta'], alpha = p['alpha'], am = p['am'], rh = p['rh'], pressure = p['pressure'], C_chl = p['C_chl'], C_sm = p['C_sm'], C_mie = p['C_mie'], n_mie = p['n_mie'], C_y = p['C_y'], S_y = p['S_y'], T_w = p['T_w'], theta_sun = p['theta_sun'], theta_view = p['theta_view'], n_w = p['n_w'], rho_s = p['rho_s'], rho_dd = p['rho_dd'], rho_ds = p['rho_ds'], delta = p['delta'], wl = wl, a_w = self.spectra['a_w'].values, daw_dT = self.spectra['daw_dT'].values, astar_ph = self.spectra['astar_ph'].values, astar_y = self.spectra['astar_y'].values, Ls_Ed = Ls/Ed)\n\n\t\t\t\tRrs_obs = Lu/Ed - Rrs_refl\n\n\t\t\t\t# Least squares\n\t\t\t\tresid = np.sum((Lu_Ed_modelled - Lu/Ed)**2 * weights)\n\n\t\t\t\treturn resid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs\n\n\t\t\tstart_time = time.time()\n\n\t\t\treg = lm.minimize(lambda x: min_funct(x)[0], params=params, method='lbfgsb', options={'disp': verbose, 'gtol': 1e-16, 'eps': 1e-07, 'maxiter': 15000, 'ftol': 1e-16, 'maxls': 20, 'maxcor': 20}) \n\n\t\t\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\t\t\tresid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs = min_funct(reg.params)\n\t\t\treg.params.add('resid', resid, False, 0.0, 100, None)\n\n\t\t\treturn reg, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs", "def fit_model(X_train, X_test, y_train, y_test, model):\n \n if model == 'LinearRegression':\n \n regressor=LinearRegression()\n regressor.fit(X_train,y_train)\n y_pred =regressor.predict(X_test)\n r2 = r2_score(y_test, y_pred)\n \n elif model == 'Lasso':\n \n lasso = Lasso()\n lasso.fit(X_train, y_train)\n lasso_pred = lasso.predict(X_test)\n r2 = r2_score(y_test, lasso_pred)\n\n elif model == 'Ridge':\n \n ridge = Ridge()\n ridge.fit(X_train, y_train)\n ridge_pred = ridge.predict(X_test)\n r2 = r2_score(y_test, ridge_pred)\n \n \n else:\n model = make_pipeline(PolynomialFeatures(2), LinearRegression())\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n r2= r2_score(y_test,y_pred)\n\n\n return r2", "def LinReg_fit(X, y, X_test=None, y_test=None, lr=1e-7, batch=1, lamb=0,\n epoch=10000, print_every=100, lamb1=0, momentum=0):\n # initialize\n W = np.random.randn(X.shape[1]) / X.shape[1] / X.shape[0]\n\n train_loss = []\n train_RMSE = []\n test_loss = []\n test_RMSE = []\n\n # batch size indicator\n b = 0\n # cache for adagrad\n G = np.zeros(W.shape)\n\n for i in range(epoch):\n inds = []\n last_step = 0\n\n for j in np.random.permutation(X.shape[0]):\n inds.append(j)\n b += 1\n\n # do the adagrad to update the parameter\n if b >= batch:\n diff = X[inds].dot(W) - y[inds]\n\n # calculate gradients\n w = np.array(W)\n w[w > 0] = 1\n w[w < 0] = -1\n grad_X = X[inds].T.dot(diff)\n grad_regulariz = lamb * W * batch / X.shape[0]\n grad_first_order_reg = lamb1 * w * batch / X.shape[0]\n grad = grad_X + grad_regulariz + grad_first_order_reg\n\n # calculate update step\n G += grad**2\n delta_W = (grad + momentum * last_step) / np.sqrt(G)\n W -= lr * delta_W\n\n # reset variables\n last_step = delta_W\n b = 0\n inds = []\n\n objective = (((X.dot(W) - y)**2).sum() + lamb * (W**2).sum()) / 2.0\n RMSE = cal_RMSE(X, W, y)\n\n if X_test is not None and y_test is not None:\n # losses\n loss_X = ((X_test.dot(W) - y_test)**2).sum() / 2.0\n loss_reg = lamb * (W**2).sum() / 2.0\n loss_first_reg = lamb1 * (abs(W).sum())\n\n obj_t = loss_X + loss_reg + loss_first_reg\n RMSE_t = cal_RMSE(X_test, W, y_test)\n\n test_loss.append(obj_t)\n test_RMSE.append(RMSE_t)\n\n # print out the progress\n if i % print_every == 0:\n if X_test is not None and y_test is not None:\n print('\\tepoch: %d; obj: %.4f; RMSE: %.4f; RMSE_test: %.4f' %\n (i, objective, RMSE, RMSE_t))\n else:\n print('\\tepoch: %d; obj: %.4f; RMSE: %.4f' %\n (i, objective, RMSE))\n\n train_loss.append(objective)\n train_RMSE.append(RMSE)\n\n print('final obj: %.4f' % train_loss[-1])\n\n return W, train_loss, train_RMSE, test_loss, test_RMSE", "def train_loop(train_per_list, cut_off_list, C_list,\n factors, non_factors, data_path, executable_path, \n trial_factors_list=None): \n if trial_factors_list is None:\n trial_factors_list=[factors]\n sql_table = 'aggregated_ctr' #Data table\n # remove cross terms\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n# factors+=['campaign_id','ad_account_id','pub_account_id', \n# 'campaign_id*site', 'ad*pub_account_id']\n con_dict_dse={'host':'db.lqm.io','db':'dse',\n 'user':'dse','passwd':'dSe@lQm'}\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n \n rtb_flag=[0,1]\n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n results = []\n for train_per in train_per_list:\n test_per = ( add_hour(train_per[1], 1), add_hour(train_per[1], 3))\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n train_df=mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, train_per, sql_features, rtb_flag)\n train_df=mysql_lqm.add_features( train_df)\n test_df= mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, test_per, sql_features, rtb_flag)\n test_df = mysql_lqm.add_features(test_df)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for trial_factors in trial_factors_list:\n trial_factors=trial_factors[:] # copy\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n \n pCTR = libLinear_functions.predict(executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss = log_loss_weighted(pCTR, amounts)\n results.append([train_per[:],trial_factors[:],\n cut_off,C,amounts.clicks.sum(),amounts.no_clicks.sum(), mean_log_loss])\n results_df=pd.DataFrame(results,columns=['date','features','cutoff','C','clicks','no_clicks','lloss'])\n results_df.to_csv(data_path+'resultsX.txt',index=False, sep='|')\n # what to do if ERROR?\n return results_df, weighted_log_loss", "def stepwise_regression(train_per_list, cut_off_list, C_list,\n factors,non_factors, data_path, executable_path):\n sql_table = 'aggregated_ctr' #Data table\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n # remove cross terms\n\n factors+=['campaign_id','ad_account_id','pub_account_id', \n 'campaign_id*site', 'ad*pub_account_id']\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n con_dict_dse={'host':'db.lqm.io','db':'dse','user':'dse','passwd':'dSe@lQm'}\n rtb_flag=[0,1]\n \n test_per_list= map(lambda x: ( add_hour(x[1], 1), add_hour(x[1], 3)), train_per_list)\n \n # test period is next 3 hours after end of training period\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n MySQL_save_data_loop(con_dict_dse, sql_table,\n train_per_list, sql_features, rtb_flag, data_path)\n MySQL_save_data_loop(con_dict_dse, sql_table,\n test_per_list, sql_features, rtb_flag, data_path)\n \n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n\n \n res_df_list=[]\n trial_factors=[]\n remaining_factors=factors[:]\n while len(remaining_factors):\n results = [] \n # we assume we cannot load all the data in memory\n # so we have to reload for every step of stepwise selection\n for train_per, test_per in zip(train_per_list, test_per_list):\n \n train_df=load_data(data_path,train_per)\n test_df=load_data(data_path,test_per)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for fac in remaining_factors:\n trial_factors.append(fac)\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n pCTR = libLinear_functions.predict(\n executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss =\\\n libLinear_functions.log_loss_weighted(pCTR, amounts)\n results.append([train_per[:], tuple(trial_factors),fac, cut_off, C, mean_log_loss])\n # what to do if ERROR?\n trial_factors.remove(fac)\n res_df=pd.DataFrame(results,columns=['train_per','factors','add_factor','cut_off','C','mean_log_loss'])\n res_avg=res_df.groupby(['factors','add_factor','cut_off','C']).agg([np.mean,np.std])\n best_params=res_avg['mean_log_loss','mean'].argmin()\n best_fac=best_params[1]\n remaining_factors.remove(best_fac)\n trial_factors.append(best_fac)\n res_df_list.append(res_df)\n results_df=pd.concat(res_df_list)\n return results_df", "def train(self, epochs):\n mse_history = []\n for iteration in range(epochs):\n error = []\n for user_id in range(self.latent_user_preferences.shape[0]):\n for item_id in range(self.latent_item_features.shape[0]):\n rating = self.ratings[user_id, item_id]\n if not np.isnan(rating):\n predicted_rating = self.predict_rating(user_id, item_id)\n err = predicted_rating - rating\n error.append(err)\n self.sgd_optimizer(user_id, item_id, err)\n mse = (np.array(error) ** 2).mean() \n if (iteration % 2) == 0:\n print('Iteration %d/%d:\\tMSE=%.6f' % (iteration, epochs, mse))\n mse_history.append(mse)\n return mse_history", "def computeRegression(Xin, Yin, Xtest, noise, ls, sv):\n\n # compute kernels\n K = kernel_sqExp(Xin, Xin, ls=ls, sv=sv)\n Kstar = kernel_sqExp(Xin, Xtest, ls=ls, sv=sv)\n Kstarstar = kernel_sqExp(Xtest, Xtest, ls=ls, sv=sv)\n\n # compute mean based on training input points\n n1 = np.shape(K)[0]\n offSet = 1e-3\n L = np.linalg.cholesky(K + noise*np.eye(n1) + offSet*np.eye(n1))\n alpha = np.linalg.solve(L.T, np.linalg.solve(L,Yin))\n f_mean = np.dot(Kstar.T,alpha) # mean of points\n \n # compute resulting covariance of predictive distribution\n v = np.linalg.solve(L, Kstar)\n cov = Kstarstar - np.dot(v.T,v)\n \n # compute log of marginal likelihood\n #lML = -0.5*np.dot(Yin.T,alpha) - np.sum(np.log(L)) - (n1/2*np.log(2*math.pi))\n lML_dataFit = -0.5*np.dot(Yin.T,alpha)\n lML_complexity = -np.sum(np.log(L))\n lML_normalize = -(n1/2*np.log(2*math.pi))\n lML_details = [lML_dataFit, lML_complexity, lML_normalize]\n lML = lML_dataFit[0] + lML_complexity + lML_normalize \n \n return f_mean, cov, lML , lML_details", "def Train(R, Ut, lam, d_tol, maxit=10, STR_iters=10, l0_penalty=1, normalize=2, split=0.8,\n print_best_tol=False, sparse='STR'):\n\n # Split data into 80% training and 20% test, then search for the best tolderance.\n # np.random.seed(0) # for consistancy\n n, _ = R.shape\n #train = np.random.choice(n, int(n * split), replace=False)\n #test = [i for i in np.arange(n) if i not in train]\n TrainR = R#[train, :]\n TestR = R#[test, :]\n TrainY = Ut#[train, :]\n TestY = Ut#[test, :]\n D = TrainR.shape[1]\n\n # Set up the initial tolerance and l0 penalty\n d_tol = float(d_tol)\n tol = d_tol\n if l0_penalty == None: l0_penalty = 0.001 * np.linalg.cond(R)\n\n # Get the standard least squares estimator\n w = np.zeros((D, 1))\n\n # check\n # print(np.nan in TrainR)\n # print(np.inf in TrainR)\n\n def AIC(w, err):\n k = 0\n for item in w:\n if item != 0:\n k += 1\n\n return 2*k+2*np.log(err)\n\n w_best = np.linalg.lstsq(TrainR, TrainY)[0]\n data_err_best = np.linalg.norm(TestY - TestR.dot(w_best), 2)\n err_best = np.linalg.norm(TestY - TestR.dot(w_best), 2) + l0_penalty * np.count_nonzero(w_best)\n aic_best = AIC(w_best[:, 0], data_err_best)\n tol_best = 0\n\n if sparse == 'STR':\n # Now increase tolerance until test performance decreases\n for iter in range(maxit):\n\n # Get a set of coefficients and error\n w = STRidge(R, Ut, lam, STR_iters, tol, normalize=normalize)\n err = np.linalg.norm(TestY - TestR.dot(w), 2) + l0_penalty * np.count_nonzero(w)\n data_err = np.linalg.norm(TestY - TestR.dot(w), 2)\n\n # Has the accuracy improved?\n aic = AIC(w[:, 0], data_err)\n if aic <= aic_best:\n aic_best = aic\n err_best = err\n w_best = w\n data_err_best = data_err\n tol_best = tol\n tol = tol + d_tol\n else:\n tol = max([0, tol - 2 * d_tol])\n d_tol = 2 * d_tol / (maxit - iter)\n tol = tol + d_tol\n\n if print_best_tol: print(\"Optimal tolerance:\", tol_best)\n\n elif sparse == 'Lasso':\n w = Lasso(R, Ut, lam, w=np.array([0]), maxit=maxit*10, normalize=normalize)\n err = np.linalg.norm(Ut - R.dot(w), 2) + l0_penalty * np.count_nonzero(w)\n data_err = np.linalg.norm(Ut - R.dot(w), 2)\n\n if err <= err_best:\n err_best = err\n w_best = w\n data_err_best = data_err\n\n return w_best, err_best, data_err_best, aic_best", "def run_randomisedLasso(X,Y):\n\n prob_all = list()\n alphas = list()\n for i_y, y in enumerate(Y):\n print(i_y)\n randLasso = ms.MyRandomizedLasso(alpha='cv',n_jobs=1)\n randLasso.fit(X,y)\n\n prob = randLasso.scores_\n prob_all.append(prob)\n \n return prob_all", "def LinearRegr(synth_sample, real_sample, label, n_cores=1):\n train_col = list(set(synth_sample.columns) - set([label]))\n \n X_test = real_sample[train_col]\n y_test = real_sample[label]\n \n X_train = synth_sample[train_col]\n y_train = synth_sample[label]\n \n model = LinearRegression(n_jobs=n_cores)\n y_pred = model.fit(X_train, y_train).predict(X_test)\n \n return np.sqrt(mean_squared_error(y_test, y_pred))", "def evaluate_regression(x, t, w, basis, degree):\n \t# TO DO:: Compute t_est and err \n #w_tranpose=w.T\n\n\n # My logic goes as follows:\n # Definition of test error is when you run the trained\n # model against a dataset that it hasn't been exposed to\n # this dataset is known as the testset \n\n # As such the basic algorithm goes as follows:\n # We do not need to recompute the weights but we need to recompute\n # phi for our test data\n\n # As such, we are interested in how well our trained weights\n # estimate against the test data so we matrix multiply our\n # weights against the phi from our test data\n # thus t_est = w_train.T*phi(x) since we want to know how well our\n # trained model estimates against the training data\n # but in implementation we do phi(x)*w_train\n # to match array dimensions \n\n\n #Compute design matrix from test data \n phi=design_matrix(x,basis,degree)\n phi_cross=np.linalg.pinv(phi)\n\n # Compute testing weights // just in case we require this variable\n #if(t is not None):\n #w_test=phi_cross.dot(t)\n #w_test=phi_cross.dot(t)\n\n # We want to be able to index into our target vector\n\n #t_est=phi.dot(w_test)\n #if (t is not None):\n # testing_estimate=phi.dot(w_test)\n #testing_estimate=phi.dot(w_test)\n\n # Estimate of our targets according to test data against learned \n # coefficients\n t_est=phi.dot(w)\n #print(\"t_est\",t_est)\n #t_est = None\n\n # We calculate the RMS error as follows\n # Take equation 3.12 of PRML and modify as follows\n # My logic:\n # The equation given in PRML gives the SSE (sum of squares error)\n # By definition the MSE (mean squared error) takes the SSE and divides \n # it by population size, we also preserve the 1/2 constant \n # throughout our calcuations \n # Afterwards we take our MSE and square root it.\n\n # Compute difference between target and estimate\n\n if(t is not None):\n \n diff=t-t_est\n # Square all observations\n diff_squared=np.power(diff,2)\n # Sum up all the observations in our vector\n sig_squared=diff_squared.sum()\n half_sig_squared=0.5*(sig_squared)\n # Calculate population size\n population_size=t.shape[0]\n rmse=np.sqrt(half_sig_squared/population_size)\n err=rmse\n else:\n err=None\n\n #diff=t-t_est\n\n\n # Square all observations \n #diff_squared=np.power(diff,2)\n\n # Sum up all the observations in our vector\n #sig_squared=diff_squared.sum()\n\n #half_sig_squared=0.5*(sig_squared)\n\n # Calculate population size\n #population_size=t.shape[0]\n\n #rmse=np.sqrt(half_sig_squared/population_size)\n #err = rmse\n #print(\"err inside function\",err)\n #err=rmse\n return (t_est, err)", "def fit_least_squares(input_data, output_data):\n # This function's code follows the formula for finding the weights\n # that create the least mean-squared error, which is:\n # w = (((y_t)x)(inv((x_t)x))_t)\n\n xtx = numpy.matmul(numpy.transpose(input_data),input_data)\n xtx_inv = numpy.linalg.inv(xtx)\n ytx = numpy.matmul(numpy.transpose(output_data),input_data)\n\n return LinearModel(numpy.transpose(numpy.matmul(ytx,xtx_inv)))", "def train_lrc(trX, trY, vaX, vaY, teX=None, teY=None, penalty='l1',\n C=2**np.arange(-4, 1).astype(np.float), seed=42):\n scores = []\n for i, c in tqdm(enumerate(C)):\n model = LogisticRegression(C=c, penalty=penalty,\n random_state=seed+i, tol=0.0001)\n model.fit(trX, trY)\n score = model.score(vaX, vaY)\n scores.append(score)\n c = C[np.argmax(scores)]\n model = LogisticRegression(C=c, penalty=penalty,\n random_state=seed+len(C), tol=0.0001)\n model.fit(trX, trY)\n return model", "def train_model(lrmodel, X, Y, devX, devY, devscores):\n done = False\n best = -1.0\n r = np.arange(1,6)\n \n while not done:\n # Every 100 epochs, check Pearson on development set\n lrmodel.fit(X, Y, verbose=2, shuffle=False, validation_data=(devX, devY))\n yhat = np.dot(lrmodel.predict_proba(devX, verbose=2), r)\n score = pearsonr(yhat, devscores)[0]\n if score > best:\n print score\n best = score\n bestlrmodel = prepare_model(ninputs=X.shape[1])\n bestlrmodel.set_weights(lrmodel.get_weights())\n else:\n done = True\n\n yhat = np.dot(bestlrmodel.predict_proba(devX, verbose=2), r)\n score = pearsonr(yhat, devscores)[0]\n print 'Dev Pearson: ' + str(score)\n return bestlrmodel", "def model(self,sample):\n\n lca = self.lca\n \n self.amount_tech = lca.tech_params['amount']\n self.amount_bio = lca.bio_params['amount']\n\n self.i_sample = 0\n self.replace_non_parameterized_exchanges(sample)\n self.replace_parameterized_exchanges(sample)\n\n lca.rebuild_technosphere_matrix(self.amount_tech)\n lca.rebuild_biosphere_matrix(self.amount_bio)\n\n score = (sum(lca.characterization_matrix)*lca.biosphere_matrix) * \\\n spsolve(lca.technosphere_matrix,lca.demand_array)\n\n np.append(self.scores, score)\n\n return score", "def linear_regression(X, Y, Xs_test, Ys_test):\n\n X_n = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n XL = np.concatenate((X_n, np.ones((len(X),1))), axis = 1)\n w = np.linalg.solve(XL.T.dot(XL),XL.T.dot(Y))\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n XL_test = np.concatenate(((X_test - np.mean(X, axis = 0)) / np.std(X, axis = 0), \n np.ones((len(X_test),1))), axis = 1)\n Y_pred = XL_test.dot(w)\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test) ** 2, axis = 1))) \n mses.append(mse) \n return mses", "def __train_model(self):\n for i in range(self.file_index):\n logger.info(\"Training the ALS model dataset \" + str(i))\n self.als = ALS(maxIter=5, regParam=0.01, userCol=\"UserId\", itemCol=\"GameId\", ratingCol=\"Userscore\",\n coldStartStrategy=\"drop\")\n self.model[i] = self.als.fit(self.df[i])\n logger.info(\"ALS model built!\")", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w = np.linalg.solve(a,b)\n loss =compute_loss_LS(y,tx,w)\n return loss, w", "def fit(self, X, y, max_iter=MAX_ITER):\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n X = self.normalize_data(X)\n X = self.add_bias(X)\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Fista(self, self.lambda_1)\n w = lasso.fit(xk=W[:, t], A=X[t], b=y[t], ind=self.groups,\n max_iter=max_iter)\n W[:, t] = w\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train(self):\n\n # Create random sample of size self.n\n inst_set = []\n while len(inst_set) < self.n:\n for inst in self.training_instances:\n if np.random.binomial(1, 0.5) == 1 and len(inst_set) < self.n:\n inst_set.append(inst)\n\n if len(inst_set) == self.n:\n break\n\n fvs, labels = TRIMLearner.get_fv_matrix_and_labels(inst_set)\n\n # Calculate initial theta\n w, b = self._minimize_loss(fvs, labels)\n\n old_loss = -1\n loss = 0\n while loss != old_loss:\n if self.verbose:\n print('Current loss:', loss)\n\n # Calculate minimal set\n loss_vector = fvs.dot(w) + b\n loss_vector -= labels\n loss_vector = list(map(lambda x: x ** 2, loss_vector))\n\n loss_tuples = []\n for i in range(len(loss_vector)):\n loss_tuples.append((loss_vector[i], inst_set[i]))\n loss_tuples.sort(key=lambda x: x[0]) # sort using only first elem\n\n inst_set = list(map(lambda tup: tup[1], loss_tuples[:self.n]))\n\n # Minimize loss\n fvs, labels = TRIMLearner.get_fv_matrix_and_labels(inst_set)\n w, b = self._minimize_loss(fvs, labels)\n\n old_loss = loss\n loss = self._calc_loss(fvs, labels, w, b)\n\n self.w = w\n self.b = b", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n #print('Training task {} with group lasso'.format(t))\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def train(self, iterations = 100):\n arguments = ()\n print(\"training...\")\n results = optimize.minimize(self.CostFunction,x0 = self.Thetas, args = arguments, options = {'disp':True, 'maxiter': iterations}, method = \"L-BFGS-B\", jac = True)\n self.Thetas = results['x']\n FinalCost, _ = self.CostFunction(self.Thetas)\n print(\"successfully trained the model\") \n print(\"Final Cost for this model is:\", FinalCost)", "def linear(self, verbose=0):\n\n # Output linear regression summary with coefficients and p-values\n # if desired\n if verbose != 0:\n model = sm.OLS(self.y_train, sm.add_constant(self.X_train)).fit()\n print(model.summary())\n\n linear_regressor = LinearRegression(fit_intercept=True, normalize=False,\n copy_X=True)\n linear_score = np.mean(cross_val_score(\n estimator=linear_regressor, X=self.X_train, y=self.y_train,\n cv=5, scoring=self.scorer))\n print('Linear score: ' + str(linear_score))\n return linear_regressor", "def train_SBL(model: DeepMoD,\n data: torch.Tensor,\n target: torch.Tensor,\n optimizer,\n extra_params, \n sparsity_scheduler,\n split = 0.8,\n exp_ID: str = None,\n log_dir: str = None,\n max_iterations: int = 10000,\n write_iterations: int = 25,\n **convergence_kwargs) -> None:\n logger = Logger(exp_ID, log_dir)\n sparsity_scheduler.path = logger.log_dir # write checkpoint to same folder as tb output.\n \n t, a, l = extra_params\n \n # Splitting data, assumes data is already randomized\n n_train = int(split * data.shape[0])\n n_test = data.shape[0] - n_train\n data_train, data_test = torch.split(data, [n_train, n_test], dim=0)\n target_train, target_test = torch.split(target, [n_train, n_test], dim=0)\n \n M = 12\n N = data_train.shape[0]\n threshold = 1e4\n # Training\n convergence = Convergence(**convergence_kwargs)\n for iteration in torch.arange(0, max_iterations):\n # ================== Training Model ============================\n prediction, time_derivs, thetas = model(data_train)\n \n tau_ = torch.exp(t)\n alpha_ = torch.min(torch.exp(a), torch.tensor(1e8, dtype=torch.float32))\n lambda_ = torch.min(torch.exp(l), torch.tensor(2e4, dtype=torch.float32))\n \n y = time_derivs[0]\n X = thetas[0] / torch.norm(thetas[0], dim=0, keepdim=True)\n \n p_MSE = N / 2 * (tau_ * torch.mean((prediction - target_train)**2, dim=0) - t + np.log(2*np.pi))\n \n A = torch.diag(lambda_) + alpha_ * X.T @ X\n mn = (lambda_ < threshold)[:, None] * (alpha_ * torch.inverse(A) @ X.T @ y)\n E = alpha_ * torch.sum((y - X @ mn)**2) + mn.T @ torch.diag(lambda_) @ mn\n p_reg = 1/2 * (E + torch.sum(torch.log(torch.diag(A)[lambda_ < threshold])) - (torch.sum(l[lambda_ < threshold]) + N * a) - N * np.log(2*np.pi))\n\n MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output\n Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)\n for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])\n loss = torch.sum(p_MSE + p_reg)\n\n # Optimizer step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if iteration % write_iterations == 0:\n # ================== Validation costs ================\n with torch.no_grad():\n prediction_test = model.func_approx(data_test)[0]\n MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output\n \n # ====================== Logging =======================\n _ = model.sparse_estimator(thetas, time_derivs) # calculating estimator coeffs but not setting mask\n logger(iteration, \n loss, MSE, Reg,\n model.constraint_coeffs(sparse=True, scaled=True), \n model.constraint_coeffs(sparse=True, scaled=False),\n model.estimator_coeffs(),\n MSE_test=MSE_test,\n p_MSE = p_MSE,\n p_reg = p_reg,\n tau = tau_,\n alpha=alpha_,\n lambda_=lambda_,\n mn=mn)\n\n # ================== Sparsity update =============\n # Updating sparsity \n update_sparsity = sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)\n if update_sparsity: \n model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)\n\n # ================= Checking convergence\n l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)))\n converged = convergence(iteration, l1_norm)\n if converged:\n break\n logger.close(model)", "def ModelRegression():\n \n fs=125\n win_len = 10\n win_shift = 2\n \n # load the data file\n data_fls, ref_fls = LoadTroikaDataset()\n targets, features, sigs, subs = [], [], [], []\n for data_fl, ref_fl in (zip(data_fls, ref_fls)):\n \n # load the signal\n sig = LoadTroikaDataFile(data_fl)\n ref = LoadTroikaRefFile(ref_fl)\n ref = np.array([x[0] for x in ref])\n subject_name = os.path.basename(data_fl).split('.')[0] \n start_indxs, end_indxs = get_indxs(sig.shape[1], len(ref), fs, win_len,win_shift)\n for i, s in enumerate(start_indxs):\n start_i = start_indxs[i]\n end_i = end_indxs[i]\n\n ppg = sig[0, start_i:end_i] \n accx = sig[1, start_i:end_i]\n accy = sig[2, start_i:end_i]\n accz = sig[3, start_i:end_i]\n \n #band pass the channels\n ppg = BandpassFilter(ppg)\n accx = BandpassFilter(accx)\n accy = BandpassFilter(accy)\n accz = BandpassFilter(accz)\n \n # creates the features\n feature, ppg, accx, accy, accz = FeatureExtraction(ppg, accx, accy, accz)\n\n sigs.append([ppg, accx, accy, accz])\n targets.append(ref[i])\n features.append(feature)\n subs.append(subject_name)\n \n targets = np.array(targets)\n features = np.array(features)\n \n # set a Random Forest Regressor model\n #classifier = RandomForestClassifier(n_estimators=100,\n # max_depth=10,\n # random_state=42,\n # class_weight='balanced')\n \n regression = RandomForestRegressor(n_estimators=200,max_depth=10)\n \n lf = KFold(n_splits=5)\n splits = lf.split(features,targets,subs)\n \n # split the data and fit the model\n for i, (train_idx, test_idx) in enumerate(splits):\n X_train, y_train = features[train_idx], targets[train_idx]\n X_test, y_test = features[test_idx], targets[test_idx]\n regression.fit(X_train, y_train)\n \n return regression", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma):\r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n for n_iter in range(max_iters):\r\n for y_n,tx_n in batch_iter(y, tx, 1):\r\n gradient = compute_stoch_gradient(y_n, tx_n, w)\r\n w = w - gamma * gradient\r\n loss = compute_loss_MSE(y_n, tx_n, w)\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n return w_list[-1], loss_list[-1]", "def train():\r\n print('Loading and compiling models...')\r\n model_systole = get_model()\r\n model_diastole = get_model()\r\n\r\n # load the preprocessed data with the heart cut-out\r\n print('Loading data...')\r\n X_train, scaling_train, ids_train, y_train = load_train_data()\r\n X_test, scaling_test, ids_test, y_test = load_test_data()\r\n\r\n nb_iter = 200 # a higher number seems to give rise to overfitting\r\n epochs_per_iter = 3 # reduces overfitting\r\n batch_size = 32 # not tuned - potential improvement\r\n calc_crps = 2 # calculate CRPS every n-th iteration (set to 0 if CRPS estimation is not needed)\r\n\r\n # remember min val. losses (best iterations), used as sigmas for submission\r\n min_val_loss_systole = sys.float_info.max\r\n min_val_loss_diastole = sys.float_info.max\r\n\r\n print('-'*50)\r\n print('Training...')\r\n print('-'*50)\r\n\r\n for i in range(nb_iter):\r\n print('-'*50)\r\n print('Iteration {0}/{1}'.format(i + 1, nb_iter))\r\n print('-'*50)\r\n\r\n # augment data to make up for low number of samples\r\n print('Augmenting images - rotations')\r\n X_train_aug = rotation_augmentation(X_train, 15)\r\n print('Augmenting images - shifts')\r\n X_train_aug = shift_augmentation(X_train_aug, 0.1, 0.1)\r\n\r\n print('Fitting systole model...')\r\n hist_systole = model_systole.fit([X_train_aug, scaling_train], y_train[:, 0], shuffle=True, nb_epoch=epochs_per_iter,\r\n batch_size=batch_size, validation_data=([X_test, scaling_test], y_test[:, 0]))\r\n\r\n print('Fitting diastole model...')\r\n hist_diastole = model_diastole.fit([X_train_aug, scaling_train], y_train[:, 1], shuffle=True, nb_epoch=epochs_per_iter,\r\n batch_size=batch_size, validation_data=([X_test, scaling_test], y_test[:, 1]))\r\n\r\n # sigmas for predicted data, actually loss function values (RMSE)\r\n loss_systole = hist_systole.history['loss'][-1]\r\n loss_diastole = hist_diastole.history['loss'][-1]\r\n val_loss_systole = hist_systole.history['val_loss'][-1]\r\n val_loss_diastole = hist_diastole.history['val_loss'][-1]\r\n\r\n if calc_crps > 0 and i % calc_crps == 0:\r\n print('Evaluating CRPS...')\r\n pred_systole = model_systole.predict([X_train, scaling_train], batch_size=batch_size, verbose=1)\r\n pred_diastole = model_diastole.predict([X_train, scaling_train], batch_size=batch_size, verbose=1)\r\n val_pred_systole = model_systole.predict([X_test, scaling_test], batch_size=batch_size, verbose=1)\r\n val_pred_diastole = model_diastole.predict([X_test, scaling_test], batch_size=batch_size, verbose=1)\r\n\r\n # CDF for train and test data (actually a step function)\r\n cdf_train = real_to_cdf(np.concatenate((y_train[:, 0], y_train[:, 1])))\r\n cdf_test = real_to_cdf(np.concatenate((y_test[:, 0], y_test[:, 1])))\r\n\r\n # CDF for predicted data\r\n cdf_pred_systole = real_to_cdf(pred_systole, loss_systole)\r\n cdf_pred_diastole = real_to_cdf(pred_diastole, loss_diastole)\r\n cdf_val_pred_systole = real_to_cdf(val_pred_systole, val_loss_systole)\r\n cdf_val_pred_diastole = real_to_cdf(val_pred_diastole, val_loss_diastole)\r\n\r\n # evaluate CRPS on training data\r\n crps_train = crps(cdf_train, np.concatenate((cdf_pred_systole, cdf_pred_diastole)))\r\n print('CRPS(train) = {0}'.format(crps_train))\r\n\r\n # evaluate CRPS on test data\r\n crps_test = crps(cdf_test, np.concatenate((cdf_val_pred_systole, cdf_val_pred_diastole)))\r\n print('CRPS(test) = {0}'.format(crps_test))\r\n\r\n print('Saving weights...')\r\n # save weights so they can be loaded later\r\n model_systole.save_weights('weights_systole.hdf5', overwrite=True)\r\n model_diastole.save_weights('weights_diastole.hdf5', overwrite=True)\r\n\r\n # for best (lowest) val losses, save weights\r\n if val_loss_systole < min_val_loss_systole:\r\n min_val_loss_systole = val_loss_systole\r\n model_systole.save_weights('weights_systole_best.hdf5', overwrite=True)\r\n\r\n if val_loss_diastole < min_val_loss_diastole:\r\n min_val_loss_diastole = val_loss_diastole\r\n model_diastole.save_weights('weights_diastole_best.hdf5', overwrite=True)\r\n\r\n # save best (lowest) val losses in file (to be later used for generating submission)\r\n with open('val_loss.txt', mode='w+') as f:\r\n f.write(str(min_val_loss_systole))\r\n f.write('\\n')\r\n f.write(str(min_val_loss_diastole))", "def fit(self, X, y, X_validate, y_validate):\n \n iterate = 800\n \n self.SGD_theta_list = [0]*len(X[0])\n self.SGD_bias = 0\n\n SGD_cost_history = []\n SGD_validate_cost_history = []\n\n for i in range(iterate):\n if(i%100==0):\n print(i,\" iterations\")\n selection = random.randint(0, len(X)-1) #selecting one random row for SGD\n temp_X = []\n temp_X.append(X[selection])\n temp_y = []\n temp_y.append(y[selection])\n self.SGD_bias, self.SGD_theta_list = self.update_thetas(np.array(temp_X), np.array(temp_y), self.SGD_theta_list, self.SGD_bias,self.training_rate)\n SGD_cost = self.cost_function(X, y, self.SGD_theta_list, self.SGD_bias)\n SGD_cost_history.append(SGD_cost)\n SGD_validate_cost = self.cost_function(X_validate, y_validate,self.SGD_theta_list, self.SGD_bias)\n SGD_validate_cost_history.append(SGD_validate_cost)\n\n self.FINAL_SGD_TRAIN_LOSS.append(SGD_cost_history[-1])\n self.FINAL_SGD_VALIDATE_LOSS.append(SGD_validate_cost_history[-1])\n\n plt.plot(list(range(iterate)), SGD_cost_history)\n plt.plot(list(range(iterate)), SGD_validate_cost_history)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Loss SGD\")\n plt.show()\n \n \n self.BGD_theta_list = [0]*len(X[0])\n self.BGD_bias = 0\n\n BGD_cost_history = []\n BGD_validate_cost_history = []\n\n for i in range(iterate):\n if(i%100==0):\n print(i,\" iterations\")\n selection = random.randint(0, len(X)-1)\n \n self.BGD_bias, self.BGD_theta_list = self.update_thetas(X, y, self.BGD_theta_list, self.BGD_bias,self.training_rate)\n\n BGD_cost = self.cost_function(X, y, self.BGD_theta_list, self.BGD_bias)\n BGD_cost_history.append(BGD_cost)\n BGD_validate_cost = self.cost_function(X_validate, y_validate,self.BGD_theta_list, self.BGD_bias)\n BGD_validate_cost_history.append(BGD_validate_cost)\n\n self.FINAL_BGD_TRAIN_LOSS.append(BGD_cost_history[-1])\n self.FINAL_BGD_VALIDATE_LOSS.append(BGD_validate_cost_history[-1])\n\n plt.plot(list(range(iterate)), BGD_cost_history)\n plt.plot(list(range(iterate)), BGD_validate_cost_history)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Loss BGD\")\n plt.show()\n\n print(\"FINAL_SGD_TRAIN_LOSS\\n\",self.FINAL_SGD_TRAIN_LOSS)\n print(\"FINAL_SGD_VALIDATE_LOSS\\n\",self.FINAL_SGD_VALIDATE_LOSS)\n print(\"FINAL_BGD_TRAIN_LOSS\\n\",self.FINAL_BGD_TRAIN_LOSS)\n print(\"FINAL_BGD_VALIDATE_LOSS\\n\",self.FINAL_BGD_VALIDATE_LOSS)\n\n \n return self", "def train(self, obs, fcst):\n D = obs.shape[0]\n LT = obs.shape[1]\n L = obs.shape[2]\n w1 = np.zeros(LT)\n w2 = np.zeros(LT)\n l = 0\n o = obs[:, :, :]\n f = fcst[:, :, :]\n bias = o - f\n\n model = sklearn.linear_model.LinearRegression()\n\n for lt in range(LT):\n day = int(np.floor(lt / 24.0)) + 1\n p = np.zeros([(D-day)*L, 2])\n a = np.zeros([(D-day)*L])\n for l in range(L):\n I = range(l*(D-day), (l+1)*(D-day))\n p[I, 0] = bias[day:, 0, l]\n p[I, 1] = bias[0:-day, lt, l]\n a[I] = bias[day:, lt, l]\n I = np.where(np.isnan(np.min(p, axis=1) + a)==0)[0]\n model.fit(p[I, :], a[I])\n w1[lt] = model.coef_[0]\n w2[lt] = model.coef_[1]\n\n self.weight_recent = w1\n self.weight_yesterday = w2\n return w1, w2", "def least_squares_SGD(y, tx, initial_w, batch_size, max_iters, gamma):\n\n num_batches = int(y.shape[0] / batch_size)\n # Define parameters to store w and loss\n ws = [initial_w]\n losses = []\n w = initial_w\n n = -1\n for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, max_iters):\n n = n + 1\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute gradient and loss\n # ***************************************************\n grad = compute_stoch_gradient(minibatch_y, minibatch_tx, w)\n loss = compute_loss(minibatch_y, minibatch_tx, w)\n w = w - gamma * (grad / batch_size)\n # store w and loss\n \n ws.append(w)\n losses.append(loss)\n '''print(\"Stochastic Gradient Descent({bi}/{ti}): loss={l}\".format(\n bi=n, ti=max_iters, l=loss, w0=w[0], w1=w[1]))'''\n\n return w, loss", "def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params", "def test_gls_vs_ols_two_ints_ols():\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=11, gain=5, readnoise=1, nints=2)\n ramp = np.asarray([x * 100 for x in range(11)])\n model1.data[0, :, 50, 50] = ramp\n model1.data[1, :, 50, 50] = ramp * 2\n slopes = ramp_fit(model1, 1024 * 30000., True, rnModel, gain, 'OLS', 'optimal', 'none')\n np.testing.assert_allclose(slopes[0].data[50, 50], 150.0, 1e-6)\n slopes_gls = ramp_fit(model1, 1024 * 30000., True, rnModel, gain, 'GLS', 'optimal', 'none')\n np.testing.assert_allclose(slopes_gls[0].data[50, 50], 150.0, 1e-6)", "def linearRegression(self, xtr, ytr, xte, yte):\n LEARNING_RATE = 0.5\n with self.graph.as_default() as graph:\n with tf.name_scope('training'):\n with tf.name_scope('loss'):\n train_loss = tf.reduce_mean(\n tf.square(self.y_train - self.y_placeholder))\n with tf.name_scope('optimizer'):\n optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)\n train = optimizer.minimize(train_loss) \n # test loss may be different\n with tf.name_scope('test'):\n with tf.name_scope('loss'):\n test_loss = tf.reduce_mean(\n tf.square(self.y_test - self.y_placeholder))\n with tf.Session() as sess:\n # Save the variables to disk.\n model_dir = \"./ckpt/\"\n builder = tf.saved_model.builder.SavedModelBuilder(model_dir)\n\n # Initialize variables\n sess.run(tf.global_variables_initializer())\n TRAIN_STEPS = 201\n\n for step in range(TRAIN_STEPS):\n sess.run([train], \n feed_dict={self.x_placeholder: xtr, \n self.y_placeholder: ytr})\n if step % 20 == 0:\n test_loss_val = sess.run([test_loss],\n feed_dict={self.x_placeholder: xte, \n self.y_placeholder: yte})\n print('step {}, test loss is {}'.format(\n step, test_loss_val))\n\n # Final training results\n a = sess.run(self.a)\n b = sess.run(self.b)\n # Draw result\n minx=np.min(np.concatenate((xtr,xte)))\n maxx=np.max(np.concatenate((xtr,xte)))\n xref=np.linspace(minx,maxx,100)\n plt.figure(0)\n plt.plot(xref, a*xref+b, 'r.')\n plt.plot(xtr, ytr, 'b.')\n plt.plot(xte, yte, 'g.')\n plt.show()", "def fit(self, X, y, max_iter=MAX_ITER):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = Lasso(alpha=self.lambda_1, positive=self.positive, max_iter=max_iter)\n lasso.fit(X[t], y[t])\n W[:, t] = lasso.coef_\n cost_function += np.linalg.norm(np.dot(X[t], W[:, t]) - y[t]) \\\n + sum(abs(W[:, t]))\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = stochastic_gradient_descent(y, tx, initial_w, 1, max_iters, gamma, loss_f = model_linear.compute_loss, grad_f = model_linear.compute_gradient, debug = debug)\n return get_last_ans(ws, losses)", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n loss = compute_gradient(y, tx, initial_w)\n for it, (yb, txb) in enumerate(random_batches(y, tx, max_iters)):\n # compute 1 SGD and the loss\n grad = compute_gradient(np.array([yb]), txb[np.newaxis, :], w)\n # update w\n w -= gamma * grad\n if it % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n return w, compute_cost(y, tx, w)", "def run_linear_regression(data_x, data_y):\n iteration_s = 100\n alpha = 0.0001550\n\n no_features = data_x.shape[1]\n len_data = data_x.shape[0]\n print(\"no_feature :, len_data: \", no_features , len_data)\n #intinilize the the\n theta = np.zeros(no_features)\n #iterations how many time do\n for i in range(0,iteration_s):\n theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)\n error = sum_of_square_error(data_x, data_y, len_data, theta)\n print(\"at iteration %d - Error is %.5f \" % (i+1, error))\n print(\"theta shape\", theta.shape)\n return theta", "def bias_var(x, y, z, first_poly = 4, complexity = 10, N = 100, method = 'OLS', seed = 42, lam = 0, train = 0.7, folds = 5):\n\n bias = np.zeros(complexity + 1)\n variance = np.zeros(complexity + 1)\n z_real = FrankeFunction(x, y)\n\n complx = np.arange(first_poly, first_poly + complexity + 1, 1)\n\n for i in range(complexity + 1):\n print(i)\n model = regression(x, y, z, k = first_poly + i, split = True, train = train, seed = seed)\n\n _, _, _, z_real_test = model.train_test(X = model.X_full, z = np.ravel(z_real), train = train, seed = seed)\n\n counter = 0\n z_tildes = np.zeros((np.size(z_real_test), N))\n for j in range(N):\n\n z_new = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)\n _, _, z_train, _ = model.train_test(X = model.X_full, z = np.ravel(z_new), train = train)\n if method == 'OLS':\n beta = model.OLS(z = z_train)\n elif method == 'Ridge':\n beta = model.Ridge(lam = lam, z = z_train)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, z = z_train)\n\n z_tilde = model.z_tilde(beta, X = model.X_test)\n z_tildes[:, j] = np.ravel(z_tilde)\n\n\n bias[i] = np.mean((np.ravel(z_real_test).reshape(-1, 1) - np.mean(z_tildes, axis = 1, keepdims = True))**2)\n variance[i] = np.mean(np.var(z_tildes, axis = 1, keepdims = True))\n\n plt.title(method + ' with N = ' + str(N) + ' times pr complexity')\n plt.plot(complx, bias, 'go--', label = 'Bias', color = 'blue')\n plt.plot(complx, variance, 'go--', label = 'Variance', color = 'red')\n #plt.ylim([np.min(errors_R2[2]*1.2), np.max(errors_R2[0]*1.2)])\n plt.legend()\n plt.xlabel('Polynomial maximum order', fontsize = 14)\n plt.ylabel('Bias/variance', fontsize = 14)\n plt.tight_layout()\n plt.savefig(results_dir + 'bias_variance' + method + '.png')\n\n plt.show()", "def train_model(self,\n env: pendulum.Pendulum,\n rollouts: int = 100,\n steps: int = 1,\n learning_rate: float = 0.001,\n tests: int = 10,\n loss_function=rmse_loss,\n ) -> Tuple[pd.DataFrame, pd.DataFrame]:\n optimizer = tf.train.AdamOptimizer(learning_rate)\n losses = []\n test_losses = []\n rollout_counter = 0\n\n try:\n # call generator function\n for rollout in pendulum.get_state_generator(steps):\n\n # break loop if enough rollouts have been made\n if rollout_counter >= rollouts:\n break\n rollout_counter += 1\n feature, target = rollout\n\n # calculate loss in GradientTape\n with tf.GradientTape() as tape:\n loss_value = loss_function(\n self.model,\n feature,\n target\n )\n # extract the gradients\n grads = tape.gradient(\n loss_value,\n self.model.variables\n )\n optimizer.apply_gradients(\n zip(grads, self.model.variables),\n global_step=tf.train.get_or_create_global_step()\n )\n\n loss = loss_value.numpy()\n losses.append(np.array([rollout, loss]))\n\n print(f\"rollout {rollout_counter}/{rollouts}, \"\n f\"loss: {loss}\\n\")\n\n self.save_model()\n\n # save the losses in a df for easy visualization\n losses_df = pd.DataFrame(\n losses,\n columns=[\"rollout\", \"mean_loss\"]\n )\n\n # run tests\n test_run = 0\n for data, target in pendulum.get_state_generator(1):\n if test_run > tests:\n break\n # calc the loss value\n loss_value = loss_function(\n self.model,\n data,\n target\n )\n # append loss to the list and keep last iteration\n test_losses.append(np.array([\n # reuse training variable for plotting\n test_run,\n loss_value.numpy()\n ]))\n test_run += 1\n\n # create dataframe out of test losses\n test_losses_df = pd.DataFrame(\n test_losses,\n columns=[\"test\", \"test_loss\"]\n )\n except KeyboardInterrupt:\n self.save_model()\n\n return losses_df, test_losses_df", "def test_linear_fit_2d_model_set(self):\n\n init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)\n x = np.arange(10)\n y = np.arange(10)\n z_expected = init_model(x, y, model_set_axis=False)\n assert z_expected.shape == (2, 10)\n\n # Add a bit of random noise\n with NumpyRNGContext(_RANDOM_SEED):\n z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, z)\n assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1)", "def linear_regression(x, t, basis, reg_lambda=0, degree=0):\n\n # TO DO:: Complete the design_matrix function.\n # e.g. phi = design_matrix(x,basis, degree)\n \n phi=design_matrix(x,basis,degree)\n phi_cross=np.linalg.pinv(phi)\n #t_tranpose=t.T\n # TO DO:: Compute coefficients using phi matrix\n if(reg_lambda==0):\n w=phi_cross.dot(t)\n if(reg_lambda!=0):\n # print(\"Inside lambda if: \")\n n_col=phi.shape[1]\n #r=phi.T.dot(phi) + reg_lambda * np.identity(n_col)\n r=reg_lambda*np.identity(n_col)+phi.T.dot(phi)\n r=np.linalg.inv(r)\n\t#r=np.linalg.inv(r)\n [email protected]\n w=z@t\n #w = phi_cross.dot(t)\n\n # Measure root mean squared error on training data.\n # Basic algorithim goes as follows:\n # \t1. We take Equation 3.12 * 1/n \n # Then we math.sqrt( of the equation obtained in 1.)\n\n # t_est: variable for estimation of targets\n t_est= phi.dot(w)\n \n # variable to calculate the difference between our target and estimate\n # target is the left operand, estimate is right operand\n diff=t-t_est\n \n # Square all the elements\n diff_squared=np.power(diff,2)\n\n # Sum up all the elements of diff_squared, i.e take square of\n # all elements then sum them up\n\n sig_squared=diff_squared.sum()\n\n # multiply by 1/2 as specified in PRML\n\n half_sig_squared=0.5*(sig_squared)\n\n # Divide by population size and square root\n population_size= t.shape[0]\n\n rmse_bforesqrt=half_sig_squared/population_size\n\n train_err = np.sqrt(rmse_bforesqrt)\n\n return (w, train_err)", "def regression(df_tot, fasit_key, chosen, max_p):\n\n with np.errstate(divide='ignore'):\n # First regression\n first_model = sm.OLS(df_tot[fasit_key], df_tot[chosen])\n\n # Initializing loop\n results = first_model.fit()\n chosen_p = chosen.copy()\n ant_break = 0\n\n # Looping through until final model is chosen\n while max(results.pvalues) > max_p or len(results.pvalues) >= max_final_numb_kandidater:\n if len(results.pvalues) <= min_kandidater:\n ant_break = 1 # count\n break\n chosen_p.remove(results.pvalues.idxmax()) # updating the chosen list\n\n with np.errstate(divide='ignore'):\n results = sm.OLS(df_tot[fasit_key], df_tot[chosen_p]).fit() # regression\n\n return results, chosen_p, ant_break", "def mlr(df, exp_vars, resp_var, \n method='ols', \n fit_intercept=True,\n kcv=3,\n normalize=False):\n from sklearn import cross_validation\n from sklearn.linear_model import LinearRegression, RidgeCV\n from sklearn.linear_model import LassoCV, ElasticNetCV\n from sklearn.metrics import r2_score\n from sklearn.utils import resample\n import matplotlib.pyplot as plt\n import seaborn as sn\n import pandas as pd\n import numpy as np\n \n # Separate data\n X = df[exp_vars]\n y = df[resp_var]\n \n # Setup model\n if method == 'ols':\n model = LinearRegression(fit_intercept=fit_intercept, \n normalize=normalize)\n elif method == 'lasso':\n model = LassoCV(fit_intercept=fit_intercept, \n normalize=normalize, \n max_iter=10000,\n cv=kcv)\n elif method == 'ridge':\n model = RidgeCV(fit_intercept=fit_intercept, \n normalize=normalize, \n alphas=np.logspace(-10, 10, 21))\n elif method == 'el-net':\n model = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],\n fit_intercept=fit_intercept, \n normalize=normalize,\n cv=kcv)\n else:\n raise ValueError('\"method\" parameter must be in [\"ols\", \"lasso\", \"ridge\", \"el-net\"]')\n \n # k-fold cross validation\n #cv_scores = cross_validation.cross_val_score(model, X, y, cv=kcv, scoring='r2')\n #print 'Mean r2 from %s-fold CV: %.3f\\n' % (kcv, cv_scores.mean())\n \n # Train model on full dataset\n model.fit(X, y)\n \n # Get y-hat\n y_pred = model.predict(X)\n \n # r2 based on calibration data\n r2 = r2_score(y, y_pred)\n print 'r2:', r2\n print ''\n \n # Summary of model\n print model\n print ''\n \n if method == 'lasso':\n print 'Lasso alpha:', model.alpha_\n print ''\n elif method == 'ridge':\n print 'Ridge alpha:', model.alpha_\n print ''\n elif method == 'el-net':\n print 'Elastic net alpha:', model.alpha_ \n print 'Elastic net L1 ratio:', model.l1_ratio_ \n print ''\n else: # OLS\n pass\n \n # Plot\n fig = plt.figure(figsize=(15,15))\n \n # Paired points for each site\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax1.plot(range(0, len(X.index)), y, 'ro', label='Observed')\n ax1.plot(range(0, len(X.index)), y_pred, 'b^', label='Modelled')\n \n ax1.set_xticks(range(0, len(X.index)))\n ax1.set_xticklabels(X.index, rotation=90, fontsize=12)\n ax1.set_xlim(0, len(X.index)-1)\n \n ax1.set_xlabel('Site code', fontsize=16)\n ax1.set_ylabel(resp_var)\n ax1.set_title('Points paired for each location', fontsize=20)\n ax1.legend(loc='best', fontsize=16)\n \n # Modelled versus observed\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=1)\n ax2.plot(y, y_pred, 'ro')\n ax2.set_xlabel('Observed', fontsize=16)\n ax2.set_ylabel('Modelled', fontsize=16)\n ax2.set_title('Modelled versus observed', fontsize=20)\n \n # Hist of residuals\n ax3 = plt.subplot2grid((2,2), (1,1), colspan=1)\n sn.distplot(y - y_pred, kde=True, ax=ax3)\n ax3.set_title('Histogram of residuals', fontsize=20)\n \n plt.tight_layout()\n \n # Get param estimates\n params = pd.Series(model.coef_, index=X.columns)\n\n # Estimate confidence using bootstrap\n # i.e. what is the std. dev. of the estimates for each parameter\n # based on 1000 resamplings\n err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], \n axis=0)\n\n # Build df\n res = pd.DataFrame({'effect':params,\n 'error':2*err})\n\n # Rough indicator of significance: are the estimated values more than\n # 2 std. devs. from 0 (~95% CI?). NB: this assumnes the \"marginal posterior\" \n # is normal, which I haven't tested for and which quite possibly isn't true\n # - use with care! \n res['signif'] = np.abs(res['effect']) > res['error']\n \n return res", "def _do_first_regression(self, Y, X, W):\n\n\t\twls_model = sm.WLS(Y, X, weights = 1.0 / W)\n\t\tresults = wls_model.fit()\n\t\tb, a = results.params # convention from paper\n\t\treturn (a,b)", "def train(loss_function='js', epoch=10, batch_size=512, phi=0.9, alpha=10):\n\n if loss_function =='js':\n model = MLPJSD()\n\n elif loss_function =='wd':\n model = MLPWD()\n\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n\n for epoch in range(epoch):\n\n optimizer.zero_grad()\n\n x = samplers.distribution1(0, batch_size)\n\n for input_x in x:\n input_x = Variable(torch.from_numpy(input_x)).float()\n break\n\n y = samplers.distribution1(phi, batch_size)\n\n for input_y in y:\n input_y = Variable(torch.from_numpy(input_y)).float()\n break\n\n if loss_function == 'js':\n\n loss = loss_js(model, input_x, input_y)\n\n elif loss_function == 'wd':\n\n loss = loss_wd(model, input_x, input_y, alpha)\n\n loss.backward()\n optimizer.step()\n\n loss_print = - loss\n\n if(epoch%50) == 0:\n print('epoch: {}, train loss: {:.6f}'.format(\n epoch, loss_print))\n\n return model, loss_print", "def get_linear_model(params):\n\n ss = StandardScaler()\n lr = ElasticNet(selection='random', random_state=42) # EN\n\n if params['pca']:\n pca = PCA(n_components=params['pca_comps'], whiten=True)\n lr_model = Pipeline(steps=(['scale', ss], ['pca', pca], ['model', lr])) # pipeline\n else:\n lr_model = Pipeline(steps=(['scale', ss], ['model', lr])) # pipeline\n\n lr_model_params = {\n 'model__alpha': loguniform(1e-1, 1e3),\n 'model__l1_ratio': uniform(0.1, .9)\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV\n linear_model = RandomizedSearchCV(lr_model, lr_model_params, n_iter=500, cv=5)\n\n return clone(linear_model)", "def train_lr(x, y, lamb):\n \n # TODO: implement the function.\n # initialize parameters w and b\n w = tf.Variable([0.0])\n b = tf.Variable(0.0)\n\n # set an optimizer\n # please check the documentation of tf.keras.optimizers.SGD\n optim = tf.keras.optimizers.SGD(learning_rate = 0.001)\n\n # loop to optimize w and b \n for i in range(1000):\n\n with tf.GradientTape() as gt:\n gt.watch([w, b])\n y_hat = regression_func(x, w, b)\n loss = loss_func(y, y_hat)\n\n dw, db = gt.gradient(loss, [w,b])\n\n del gt\n\n optim.apply_gradients(zip([dw,db],[w,b]))\n\n\n return w, b", "def create_model(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window):\n model = smooth_tfactor(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window)\n opt = torch.optim.SGD(model.parameters(),lr=0.001)\n return model, opt", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def gls(times,signal, f0=None, fn=None, df=None, errors=None, wexp=2):\n T = times.ptp()\n n = len(times)\n if errors is None:\n errors = np.ones(n)\n maxstep = int((fn-f0)/df+1)\n \n #-- initialize parameters\n f1 = np.zeros(maxstep) #-- frequency\n s1 = np.zeros(maxstep) #-- power\n p1 = np.zeros(maxstep) #-- window\n l1 = np.zeros(maxstep) #-- power LS\n \n #-- calculate generalized least squares\n pyGLS.gls(times+0.,signal+0.,errors,f0,fn,df,wexp,f1,s1,p1,l1)\n return f1,s1", "def lin_reg():\n \n year = 2013\n \n # import temperature and ridership data\n data_array = process_data()\n \n # select month, day, hour, temperature, precipitation, and snow data from data_array\n X = data_array[:,[1,2,3]]\n # select ridership data from data_array\n Y = data_array[:,4]\n\n # make array vertical so that scikit-learn can process it\n X = X.reshape(X.shape[0], -1)\n Y = Y.reshape(Y.shape[0], -1)\n\n # splits data into training and testing bits\n X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=0.5)\n \n # sets degree of polynomial regression\n # in testing, anything greater than 7 will give a MemoryError\n degrees = 7\n\n # initalize scikit-learn model\n model = make_pipeline(PolynomialFeatures(degrees), Ridge())\n\n # fits a model to training data\n print 'fitting model...'\n model.fit(X_train, y_train)\n\n # scores model\n print \"Year %d, %d degree polynomial regression\" % (year, degrees)\n print \"Train R^2 %f\"%model.score(X_train, y_train)\n print \"Test R^2 %f\"%model.score(X_test, y_test)\n\n # pickles and saves model\n pickle.dump(model, open('LargeDataStorage/mlModelNoWeather', 'wb'))\n pass", "def generative_model(X, Y, Xs_test, Ys_test):\n initial_sensor_loc = np.random.randn(7, 2) * 100\n estimated_sensor_loc = find_mle_by_grad_descent_part_e(\n initial_sensor_loc, Y, X, lr=0.001, num_iters=1000)\n\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n Y_pred = np.array(\n [get_object_location(estimated_sensor_loc, X_test_single) for X_test_single in X_test])\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test)**2, axis=1)))\n mses.append(mse)\n return mses", "def generative_model(X, Y, Xs_test, Ys_test):\n initial_sensor_loc = np.random.randn(7, 2) * 100\n estimated_sensor_loc = find_mle_by_grad_descent_part_e(\n initial_sensor_loc, Y, X, lr=0.001, num_iters=1000)\n\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n Y_pred = np.array(\n [get_object_location(estimated_sensor_loc, X_test_single) for X_test_single in X_test])\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test)**2, axis=1)))\n mses.append(mse)\n return mses", "def experiment_linear_lp(adv_norm_type, dual_norm_type, baseline_norm_types,\n attack_step_dir):\n module_name = 'train'\n # log_dir = 'runs_linear_%s' % adv_norm_type\n # log_dir = 'runs_linear_postnorm_%s' % adv_norm_type\n log_dir = 'runs_linear_postnorm_randinit_%s' % adv_norm_type\n exclude = '*'\n\n shared_params = get_shared_params(adv_norm_type, dual_norm_type,\n attack_step_dir)\n\n # No 0 regularization coefficient\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n # reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]\n # Between 1e-3 and 1e-1 for d/n=10 the adv robustness drops\n # reg_coeff += [3e-3, 5e-3, 3e-2, 5e-2, 3e-1, 5e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % b for b in baseline_norm_types] +\n ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n\n params = []\n\n # cvxpy solution\n # njobs=3*6*1=18\n cvxpy_params = nameit('optim', [\n ('name', 'cvxpy'),\n ('norm', dual_norm_type),\n ('niters', 10000),\n ('lr', 0), # keep cvxpy sol fixed\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+cvxpy_params)]\n\n # njobs=3*6*2=36\n # CD with line search doesn't work right, so not including it\n gd_ls = nameit('optim', [\n ('name', 'gd_ls'), # ['gd_ls', 'cd_ls']),\n ('niters', 10000),\n ('bound_step', True),\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+gd_ls)]\n\n # Implicit bias with fixed lr\n # GD with fixed lr performs similar to line search, so we don't include them\n # njobs=3*6*11=198\n # gd_fixed_lr = nameit('optim', [\n # ('name', 'gd'),\n # ('niters', 10000),\n # ('lr', [\n # 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2, 1e-1,\n # 3e-1, 1\n # ]),\n # ])\n # params += [OrderedDict(shared_params+linear_noreg_model_params+gd_fixed_lr)]\n\n # njobs=3*6*19=342\n cd_fixed_lr = nameit('optim', [\n ('name', ['cd', 'signgd']),\n ('niters', 10000),\n ('lr', [\n 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2, 1e-1,\n 3e-1, 1, 2, 3, 6, 9, 10, 20, 30, 50\n ]),\n ])\n params += [OrderedDict(shared_params+linear_noreg_model_params+cd_fixed_lr)]\n\n # Explicit regularization with line search\n # njobs=3*6*20*4*2=2880\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n # njobs=3*6*5=90\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_from_cvxpy', True),\n ]))\n adv_train_params = OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)\n\n params += [adv_train_params]\n\n return params, log_dir, module_name, exclude", "def fitRateSpectrum(Times, Data, Rates, w, Lnorm='ridge', standardizeData=True, CalcNdof=False, rho=0.5):\n\n \n if Lnorm == 'lasso':\n # Use L1-norm Lasso regression\n try:\n from scikits.learn.linear_model import Lasso \n except:\n print 'Error: could NOT import Lasso from scikits.learn.linear_model. Using L2 norm (ridge).'\n Lnorm = 'ridge'\n\n if Lnorm == 'enet':\n # Use L1-L2-mixture norm Lasso regression\n try:\n from scikits.learn.linear_model import ElasticNet\n except:\n print 'Error: could NOT import ElasticNet from scikits.learn.linear_model. Using L2 norm (ridge).'\n Lnorm = 'ridge'\n\n\n if Lnorm == 'lasso':\n\n lasso = Lasso(alpha = w, fit_intercept=False) # assume the data is already \"centered\" -- i.e. no zero rate\n X, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)\n #print 'X.shape', X.shape, 'Data.shape', Data.shape\n lasso.fit(X, Data, max_iter=1e6, tol=1e-7)\n A = lasso.coef_\n\n # Compute \"residual sum of squares\" (note loss function is different for L1-norm)\n y_pred_lasso = lasso.predict(X)\n diff = y_pred_lasso - Data\n\n\n elif Lnorm == 'enet':\n\n # NOTE: The convention for rho is backwards in scikits.learn, instead of rho we must send (1-rho)\n enet = ElasticNet(alpha = w, rho=(1.-rho), fit_intercept=False) # assume the data is already \"centered\" -- i.e. no zero rate\n X, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)\n #print 'X.shape', X.shape, 'Data.shape', Data.shape\n #enet.fit(X, Data, max_iter=1e6, tol=1e-7)\n enet.fit(X, Data, max_iter=1e6, tol=1e-3) # for testing\n A = enet.coef_\n\n # Compute \"residual sum of squares\" (note loss function is different for L1-norm)\n y_pred_enet = enet.predict(X)\n diff = y_pred_enet - Data\n\n\n elif Lnorm == 'ridge':\n X, Xmean = Xmatrix(Rates, Times, w, standardizeData=standardizeData )\n Xinv = linalg.pinv(X)\n\n y = np.array( Data.tolist() + [0. for k in Rates] )\n if standardizeData:\n y - y.mean()\n A = np.dot(Xinv, y)\n\n # Compute \"residual sum of squares\" (note loss function is different for L1-norm)\n diff = SumSpectra(A, Rates, Times) - Data\n\n rss = np.dot(diff,diff) # Residual sum of squares\n\n if CalcNdof:\n Xsub, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)\n XT = np.transpose(Xsub)\n I_XT = np.eye(XT.shape[0])\n I_X = np.eye(Xsub.shape[0])\n Xtemp = np.dot(Xsub, np.linalg.inv(np.dot(XT,Xsub) + w*I_XT))\n ndof = np.trace(I_X - np.dot(Xtemp,XT))\n else:\n ndof = None\n\n return A, rss, ndof", "def Lasso(X0, Y, lam, w=np.array([0]), maxit=100, normalize=2):\n\n # Obtain size of X\n n, d = X0.shape\n X = np.zeros((n, d), dtype=np.complex64)\n Y = Y.reshape(n, 1)\n\n # Create w if none is given\n if w.size != d:\n w = np.zeros((d, 1), dtype=np.complex64)\n w_old = np.zeros((d, 1), dtype=np.complex64)\n\n # First normalize data\n if normalize != 0:\n Mreg = np.zeros((d, 1))\n for i in range(0, d):\n Mreg[i] = 1.0 / (np.linalg.norm(X0[:, i], normalize))\n X[:, i] = Mreg[i] * X0[:, i]\n else:\n X = X0\n\n # Lipschitz constant of gradient of smooth part of loss function\n L = np.linalg.norm(X.T.dot(X), 2)\n\n # Now loop until converged or max iterations\n for iters in range(0, maxit):\n\n # Update w\n z = w + iters / float(iters + 1) * (w - w_old)\n w_old = w\n z = z - X.T.dot(X.dot(z) - Y) / L\n for j in range(d):\n w[j] = np.multiply(np.sign(z[j]), np.max([abs(z[j]) - lam / L, 0]))\n\n # Could put in some sort of break condition based on convergence here.\n\n # Now that we have the sparsity pattern, used least squares.\n biginds = np.where(w != 0)[0]\n if biginds != []: w[biginds] = np.linalg.lstsq(X[:, biginds], Y)[0]\n\n # Finally, reverse the regularization so as to be able to use with raw data\n if normalize != 0:\n return np.multiply(Mreg, w)\n else:\n return w", "def start_lasso_regression(training_records, output):\n regressor = LassoRegression(iterations=NUM_OF_ITERATIONS, learning_rate=LEARNING_RATE, regularization_strength=LASSO_REGULARIZATION_STRENGTH)\n weights_table, mse_costs, predicted_outputs = regressor.calculate_weights(training_records, output)\n clf = linear_model.Lasso(fit_intercept=False)\n clf.fit(training_records, output)\n print \"Starting gradient descent with {0} iterations, learning rate of {1} and a regularization \" \\\n \"strength of {2}\".format(NUM_OF_ITERATIONS, LEARNING_RATE, LASSO_REGULARIZATION_STRENGTH)\n\n print \"Running...\"\n\n final_weights = [weights_table[-1][i] for i in range(0, NUM_OF_FEATURES+1)]\n print \"After %s iterations of Gradient Descent (our implementation), the final weights are : %s\" % (NUM_OF_ITERATIONS, final_weights)\n\n print \"Using Sklearn's Lasso Regression, the weights are : %s\" % clf.coef_\n return weights_table, mse_costs", "def demo_train(ts_struct_list, frc_model=None, fg_mdl=None, fs_mdl=None, verbose=False,\n return_model=False, rewrite=True):\n\n # Check arguments:\n if fg_mdl is None:\n fg_mdl = frc_class.IdentityGenerator(name=\"Identity generator\", on=False)\n\n if fs_mdl is None:\n fs_mdl = gnt_class.FeatureGeneration() # IdentityModel(name=\"Identity selector\")\n\n if frc_model is None:\n frc_model = frc_class.CustomModel(Lasso, name=\"Lasso\", alpha=0.01)\n\n model = frc_class.PipelineModel(gen_mdl=fg_mdl, sel_mdl=fs_mdl, frc_mdl=frc_model)\n results = []\n res_text = []\n\n for ts in ts_struct_list:\n data = regression_matrix.RegMatrix(ts, x_idx=TS_IDX, y_idx=TS_IDX)\n\n # Create regression matrix\n data.create_matrix(nsteps=N_STEPS, norm_flag=True) # this creates data.Y, data.X and some other fields\n\n # Split data for training and testing\n data.train_test_split(TRAIN_TEST_RATIO)\n\n # train the model. This returns trained pipeline and its steps\n model, frc, gen, sel = model.train_model(data.trainX, data.trainY)\n\n selection_res = \"\\n Feature selection results: problem status {}, selected {} from {} \\\\\\\\ \\n\".\\\n format(sel.status, len(sel.selected), sel.n_vars)\n\n frcY, _ = data.forecast(model) # returns forecasted matrix of the same shape as data.Y\n # frcY, idx_frc = data.forecast(model, idx_rows=data.idx_test) # this would return forecasts only for data.testY\n\n data.plot_frc(n_frc=5, n_hist=10, folder=SAVE_DIR) #this saves figures into SAVE_DIR\n\n train_mae = data.mae(idx_rows=data.idx_train, idx_original=data.original_index)\n train_mape = data.mape(idx_rows=data.idx_train, idx_original=data.original_index)\n\n test_mae = data.mae(idx_rows=data.idx_test, idx_original=data.original_index)\n test_mape = data.mape(idx_rows=data.idx_test, idx_original=data.original_index)\n\n index = [ts.data[i].name for i in TS_IDX]\n res1 = pd.DataFrame(train_mae, index=index, columns=[(\"MAE\", \"train\")])\n res2 = pd.DataFrame(train_mape, index=index, columns=[(\"MAPE\", \"train\")])\n res3 = pd.DataFrame(test_mae, index=index, columns=[(\"MAE\", \"test\")])\n res4 = pd.DataFrame(test_mape, index=index, columns=[(\"MAPE\", \"test\")])\n res = pd.concat([res1, res2, res3, res4], axis=1)\n\n configuration_str = \"\\n Time series {} forecasted with {} + '{}' feature generation model and \" \\\n \"'{}' feature selection model \\\\\\\\ \\n\".format(ts.name, frc.name, gen.name, sel.name)\n if verbose:\n print(configuration_str)\n print(selection_res)\n print(res)\n\n results.append(res)\n res_text.append(configuration_str)\n res_text.append(selection_res)\n\n saved_mdl_fname = model.save_model(file_name=FNAME_PREFIX, folder=SAVE_DIR) # saving in not an option yet\n # model = frc_class.PipelineModel().load_model(file_name=fname)\n\n # write results into a latex file\n my_plots.save_to_latex(results, df_names=res_text, folder=SAVE_DIR, rewrite=rewrite)\n print(\"Results saved to folder {}\".format(SAVE_DIR))\n\n if return_model:\n return model, saved_mdl_fname\n\n return saved_mdl_fname", "def least_squares(y, tx, loss_function=rmse):\n w = np.linalg.solve(tx.T @ tx, tx.T @ y)\n loss = loss_function(y, tx, w)\n return w, loss", "def least_squares(y, tx):\r\n w = np.linalg.solve(tx.T@tx,tx.T@y)\r\n loss = compute_loss_MSE(y, tx, w)\r\n return w,loss", "def solver_mll(X, y, alpha=0.1, C=None, S=None, callback=None, positive=False,\n maxiter=1000, tol=1e-4, compute_obj=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False,\n positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n if S is None:\n S = np.zeros((n_features, n_tasks))\n if C is None:\n C = np.ones(n_features)\n else:\n if C.max() <= 0:\n C = np.ones(n_features)\n\n old_theta = C[:, None] * S\n objs = []\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n for i in range(maxiter):\n # W = block_diag(X * C[None, None, :], \"csc\")\n # lasso.fit(W, y.flatten())\n # S = lasso.coef_.reshape(n_tasks, n_features).T\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(theta.max(), old_theta.max(), 1.)\n old_theta = theta.copy()\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n if callback:\n callback(theta, obj=ll)\n if dll < tol:\n break\n\n if i == maxiter - 1:\n print(\"**************************************\\n\"\n \"******** WARNING: Stopped early. *****\\n\"\n \"\\n\"\n \"You may want to increase maxiter. Last err: %f\" % dll)\n return C, S, objs", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=mse, gradient=mse_grad):\n w = initial_w\n for iter in range(max_iters):\n # randomly select datapoint\n id = np.random.randint(y.shape[0])\n sample_y, sample_x = y[id], tx[id]\n # compute gradient\n grad = gradient(y, tx, w)\n # update w\n w = w - gamma * grad\n loss = loss_function(y, tx, w)\n return w, loss", "def test_dc_lcsmodel_class():\n\n # Set the problem size.\n n = 1000\n p = 3\n\n # Define the test model\n TM = test.Model1(n,p)\n\n # Note: diff_A/diff_b do not require A/b as an input in this case,\n # but in the more general case they might.\n\n # Check the basic model calculations.\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n\n dA_1 = TM.diff_A(A, theta, 0).todense()\n dA_2 = TM.diff_A(A, theta, 1).todense()\n dA_3 = TM.diff_A(A, theta, 2).todense()\n dA_4 = TM.diff_A(A, theta, 3).todense()\n Z = numpy.zeros_like(dA_1)\n \n dB_1 = TM.diff_b(B, theta, 0)\n dB_2 = TM.diff_b(B, theta, 1)\n dB_3 = TM.diff_b(B, theta, 2)\n dB_4 = TM.diff_b(B, theta, 3)\n z = numpy.zeros_like(dB_1)\n \n print \"dA/dtheta_1 check:\", numpy.allclose(dA_1, TM.A1.todense())\n print \"dA/dtheta_2 check:\", numpy.allclose(dA_2, TM.A2.todense())\n print \"dA/dtheta_3 check:\", numpy.allclose(dA_3, Z)\n print \"dA/dtheta_4 check:\", numpy.allclose(dA_4, Z)\n\n print \"db/dtheta_1 check:\", numpy.allclose(dB_1, z)\n print \"db/dtheta_2 check:\", numpy.allclose(dB_2, z)\n print \"db/dtheta_3 check:\", numpy.allclose(dB_3, TM.B1)\n print \"db/dtheta_4 check:\", numpy.allclose(dB_4, TM.B2)\n\n\n #\n # Test the lcs model class\n #\n\n gLCS = DC_LCSModel()\n gLCS.eval_A = TM.eval_A\n gLCS.eval_b = TM.eval_b\n gLCS.diff_A = TM.diff_A\n gLCS.diff_b = TM.diff_b\n \n gLCS.quiet=True\n gLCS.A_params_mask = numpy.array([True, True, False, False])\n gLCS.b_params_mask = numpy.array([False, False, True, True])\n\n x = gLCS.eval(theta)\n #print x.shape\n\n for k in range(p):\n print \"Primal solution for x_{}, matches spsolve calculation: {}\".\\\n format(k, numpy.allclose(x[:,k], spla.spsolve(A,B[:,k])))\n\n\n D = gLCS.jacobian(theta)\n\n # -- If theta[1]=0, and theta[2:3] are fixed, then there is an analytical\n # calculation for x(theta[0]), and in this case we can check the first\n # column of D.\n\n theta = numpy.array((5.1, 0, 1.2, 2.1))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n D = gLCS.jacobian(theta)\n\n for k in range(p):\n D_col_1 = -(1./theta[0]**2) * B[:,k]\n print \"First column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,0], D_col_1))\n\n\n # -- We'll use a numerical approximation to check the second column of D\n\n h = 0.000001\n theta = numpy.array((5.1, 1.1, 1.2, 2.1))\n dtheta = numpy.array((0., h, 0., 0.))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n x = gLCS.eval(theta)\n D = gLCS.jacobian(theta)\n\n A_dt = TM.eval_A(theta + dtheta)\n B_dt = TM.eval_b(theta + dtheta)\n\n for k in range(p):\n x_dt = spla.spsolve(A_dt, B_dt[:,k])\n D_col_2_num_approx = (x_dt - x[:,k])/h\n max_abs_err = numpy.max(numpy.abs(D[k,:,1] - D_col_2_num_approx))\n\n print \"Second column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,1], D_col_2_num_approx))\n \n print \"Max abs error in second column of D_{}: {}\".\\\n format(k, max_abs_err)\n \n\n # -- If theta[0] and theta[1] are fixed, A(theta) is determined, and A^{-1}\n # is fixed. With a little math you can analytically calculate the third\n # and fourth columns of D. In fact x(theta) is linear in theta[2] and\n # theta[3], but not in theta[0] and theta[1].\n\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A = TM.eval_A(theta)\n D = gLCS.jacobian(theta);\n\n for k in range(p):\n D_col_3 = spla.spsolve(A, TM.B1[:,k])\n\n print \"Third column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,2], D_col_3))\n\n\n for k in range(p):\n D_col_4 = spla.spsolve(A, TM.B2[:,k])\n \n print \"Fourth column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,3], D_col_4))", "def learn(self):\r\n \r\n # unpack\r\n X = self.Train.X\r\n Y = self.Train.Y\r\n DY = self.Train.DY\r\n \r\n NX ,ND = X.shape\r\n NDY,_ = DY.shape\r\n \r\n print 'Build Information Matricies ...'\r\n \r\n # functions\r\n ay0 = np.array([[1.]]*NX)\r\n ay1 = X\r\n ay2 = np.reshape( np.einsum('ij,ik->ijk',X,X) , [-1,ND*ND] )\r\n\r\n # reduce redundant basis variables\r\n i_doub = np.tri(ND,k=-1).T == 1\r\n ay2[:,i_doub.ravel()] = ay2[:,i_doub.ravel()] * 2. \r\n i_keep = np.tri(ND,k=0).T == 1\r\n ay2 = ay2[:,i_keep.ravel()]\r\n\r\n # basis matrix, functions\r\n Ay = np.hstack([ay0,ay1,ay2])\r\n \r\n # arrays for the least squares regression\r\n At = Ay\r\n Yt = Y\r\n \r\n # gradients\r\n if NDY:\r\n ad0 = np.array([[0.]]*NX*ND)\r\n \r\n ad1 = np.tile( np.eye(ND) , [NX,1] )\r\n \r\n ad2a = np.repeat( np.eye(ND)[:,None,:] , ND , 1 )\r\n ad2a = np.reshape( ad2a , [-1,ND*ND] ) \r\n ad2a = np.repeat( ad2a, NX, axis=0 ) * np.repeat( np.tile( X, [ND,1] ) , ND, axis=1 )\r\n \r\n ad2b = np.repeat( np.eye(ND)[:,:,None] , ND , 2 )\r\n ad2b = np.reshape( ad2b , [-1,ND*ND] ) \r\n ad2b = np.repeat( ad2b, NX, axis=0 ) * np.tile( np.tile( X, [ND,1] ) , [1,ND] )\r\n \r\n ad2 = ad2a + ad2b\r\n \r\n # reduce redundant bases\r\n ad2[:,i_doub.ravel()] = ad2[:,i_doub.ravel()] * 2.\r\n ad2 = ad2[:,i_keep.ravel()] \r\n \r\n Ad = np.hstack([ad0,ad1,ad2])\r\n \r\n # add to arrays for least squares regression\r\n At = np.vstack([At,Ad])\r\n Yt = np.vstack([Yt, np.ravel(DY.T)[:,None]])\r\n \r\n print 'Least Squares Solve ...'\r\n B = sp.linalg.lstsq(At,Yt)[0] \r\n \r\n # unpack data\r\n c = B[0,0]\r\n b = B[1:ND+1]\r\n \r\n A = np.zeros([ND,ND])\r\n A[i_keep] = B[ND+1:,0]\r\n A[i_keep.T] = A.T[i_keep.T]\r\n \r\n # problem forumulation\r\n A = A*2.\r\n \r\n # store results\r\n self.c = c\r\n self.b = b\r\n self.A = A\r\n \r\n print ''" ]
[ "0.67663735", "0.65927625", "0.65322", "0.6456039", "0.6399128", "0.6376486", "0.6376346", "0.6321311", "0.62462187", "0.6168609", "0.6121046", "0.60952455", "0.60772586", "0.60441196", "0.6029791", "0.60081315", "0.5985883", "0.59815985", "0.5979063", "0.59740895", "0.59594625", "0.593622", "0.5934973", "0.5915493", "0.5898285", "0.58929014", "0.58805156", "0.5876658", "0.5873537", "0.58682483", "0.58447534", "0.58367866", "0.58275205", "0.58254874", "0.58198553", "0.58170235", "0.5814674", "0.5814519", "0.58011955", "0.57722443", "0.5769062", "0.5766677", "0.5762845", "0.5762525", "0.5762306", "0.5752847", "0.5748185", "0.57474893", "0.57427925", "0.5740625", "0.57370234", "0.5729024", "0.5722486", "0.5721308", "0.5711759", "0.5708569", "0.5704845", "0.5692845", "0.5691812", "0.5685834", "0.5684368", "0.5682234", "0.5678222", "0.56701785", "0.5665688", "0.5656437", "0.565628", "0.5650155", "0.56490266", "0.5645619", "0.56390196", "0.56383353", "0.56304324", "0.5629455", "0.56275827", "0.5624091", "0.56234163", "0.5621074", "0.5619473", "0.5614226", "0.56137866", "0.56124896", "0.56074256", "0.56057453", "0.56020933", "0.559844", "0.55972064", "0.5594859", "0.558912", "0.558912", "0.5581207", "0.5580153", "0.55787814", "0.55756056", "0.55735916", "0.5569147", "0.5568056", "0.55659574", "0.5563159", "0.5562329", "0.556232" ]
0.0
-1
Process and return selected confounds from the confounds file
def _select_confounds(confounds_file, selected_confounds): import pandas as pd import numpy as np confounds_df = pd.read_csv(confounds_file, sep='\t', na_values='n/a') # fill the first value of FramewiseDisplacement with the mean. if 'FramewiseDisplacement' in selected_confounds: confounds_df['FramewiseDisplacement'] = confounds_df['FramewiseDisplacement'].fillna( np.mean(confounds_df['FramewiseDisplacement'])) desired_confounds = confounds_df[selected_confounds] return desired_confounds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _select_confounds(confounds_file, selected_confounds):\n import pandas as pd\n import numpy as np\n import re\n\n confounds_df = pd.read_csv(confounds_file, sep='\\t', na_values='n/a')\n # regular expression to capture confounds specified at the command line\n confound_expr = re.compile(r\"|\".join(selected_confounds))\n expanded_confounds = list(filter(confound_expr.fullmatch, confounds_df.columns))\n imputables = ('framewise_displacement', 'std_dvars', 'dvars', '.*derivative1.*')\n\n # regular expression to capture all imputable confounds\n impute_expr = re.compile(r\"|\".join(imputables))\n expanded_imputables = list(filter(impute_expr.fullmatch, expanded_confounds))\n for imputable in expanded_imputables:\n vals = confounds_df[imputable].values\n if not np.isnan(vals[0]):\n continue\n # Impute the mean non-zero, non-NaN value\n confounds_df[imputable][0] = np.nanmean(vals[vals != 0])\n\n desired_confounds = confounds_df[expanded_confounds]\n # check to see if there are any remaining nans\n if desired_confounds.isna().values.any():\n msg = \"The selected confounds contain nans: {conf}\".format(conf=expanded_confounds)\n raise ValueError(msg)\n return desired_confounds", "def read_conect(self):\n self.conect_section = []\n if not self.lines:\n self.read_lines()\n for line in self.lines:\n if \"CONECT\" in line[0:6]:\n self.conect_section.append(line)", "def get_convos():\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(b' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(b', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos", "def _read_conll(cls, input_file):\n #def read_conll(input_file):\n sents = []\n sent, labels = [], []\n for line in open(input_file):\n if line.startswith(\"# sent_id\"):\n current_id = line.strip().split(\" = \")[1]\n elif line.strip() == \"\":\n if len(sent) > 0:\n sents.append((current_id, sent, labels))\n sent, labels = [], []\n else:\n token, label = line.strip().split(\"\\t\")\n sent.append(token)\n labels.append(label)\n return sents", "def main():\n processSetOfCerFiles(sys.argv[1:])", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def semcor2conc(args):\r\n input_files = list_files(*args.input_files)\r\n types = list(args.types)\r\n output_file = args.output_file or output_default / '{}_conc.csv'.format('_'.join(types))\r\n output_file = Path(output_file)\r\n left_context = args.left\r\n right_context = args.right\r\n separator = args.separator\r\n filter_pos = args.pos\r\n kind_id = args.kind_id\r\n with output_file.open('w') as file:\r\n x = 'last\\tnext\\tlemma' if args.add_closest else 'lemma'\r\n file.write('\\t'.join(['concordance', 'file', 'token_id', 'left', 'wordform', 'right', x, 'pos', 'sense_key\\n']))\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n tokenlist = list(generate_tokenlist(corpus_file.text))\r\n chosen_words = [index for (index, token) in enumerate(tokenlist) if token.lemma in types]\r\n for word in chosen_words:\r\n node = tokenlist[word]\r\n pos = node.pos\r\n if filter_pos and not re.match(r'{}'.format([x for x in filter_pos]), pos):\r\n continue\r\n if kind_id == 'lemma_pos':\r\n wordtype = '/'.join([node.lemma, node.pos])\r\n elif kind_id == 'wordform':\r\n wordtype = node.wordform\r\n else:\r\n wordtype = node.lemma\r\n token_id = '/'.join([wordtype, corpus_file.shortname, str(word + 1)])\r\n left, right = generate_context(tokenlist, word, left_context, right_context, separator, len(tokenlist))\r\n if args.add_closest:\r\n last = tokenlist[word-1].wordform\r\n following = tokenlist[word+1].wordform\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, last, following, node.lemma, pos, node.sense_key or 'NA']\r\n else:\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, node.lemma, pos, node.sense_key or 'NA']\r\n file.write('\\t'.join(line) + '\\n')\r\n print('File \"{}\" processed.'.format(input_file.stem))", "def read_confounds(filename, confounds):\n df_confounds = pandas.read_csv(filename, sep='\\t', usecols=confounds)\n return df_confounds", "def _load_confounds_main(\n confounds_raw, strategy=[\"minimal\"], n_components=0.95, motion_model=\"6params\"\n):\n\n # Convert tsv file to pandas dataframe\n if not isinstance(confounds_raw, pd.DataFrame):\n confounds_raw = pd.read_csv(confounds_raw, delimiter=\"\\t\", encoding=\"utf-8\")\n\n # Add chosen confounds based on strategy to dataframe\n confounds_of_interest = set()\n confounds_out = pd.DataFrame()\n\n for strat in strategy:\n if strat in confound_dict.keys():\n\n confounds_of_interest |= set(_confound_strat(strat, confounds_raw))\n else:\n confounds_of_interest.add(strat)\n\n # Remove motion confounds and concatenate columns to confounds_out\n non_motion_confounds = [\n conf\n for conf in confounds_of_interest\n if ((\"rot\" not in conf) and (\"trans\" not in conf))\n ]\n\n confounds_out = pd.concat(\n (confounds_out, confounds_raw[list(non_motion_confounds)]), axis=1\n )\n\n # Apply PCA on motion confounds\n motion_bool = set(motion_6params) & confounds_of_interest\n if motion_bool:\n confounds_out = _pca_motion(\n confounds_out, confounds_raw, n_components, motion_model,\n )\n\n return confounds_out", "def select_confounds(subject_id, run_num):\n confounds_dir = f'/data/sub-%02d/func/' % int(subject_id)\n confounds_file = confounds_dir+f'sub-%02d_task-tsl_run-%d_desc-confounds_timeseries.tsv' % (int(subject_id), int(run_num))\n conf_df = pd.read_csv(confounds_file, sep='\\t')\n return conf_df", "def fetch_corpous_from_file(filepath):\n f = open(filepath, 'r')\n corpus_text = f.read() \n corpus_sentence_list = corpus_text.lower().split('.')\n corpus_list_sent_processed = [remove_special_chars(item) for item in corpus_sentence_list if len(item)>1] \n return corpus_list_sent_processed", "def handle(self):\n\n # We generate the status file with the catched status.\n Generate(self.catched, \"SYNTAX\").status_file()\n\n # We return the parsed status.\n return self.catched", "def select_confounds(subject_id, run_num):\n confounds_dir = f'/data/sub-%02d/func/' % int(subject_id)\n confounds_file = confounds_dir+f'sub-%02d_task-tsl_run-%d_desc-confounds_timeseries.tsv' % (int(subject_id), int(run_num))\n conf_df = pd.read_csv(confounds_file, sep='\\t')\n return conf_df", "def cat(config, input):\n for file in input:\n while True:\n output = file.read()\n if not output:\n break\n m = SearchMatches(file, output, config.regex, config.color, config.underline)\n m.print_match_lines()", "def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data", "def make_conc_obj_from_conclines(conc_results):\n from corpkit.interrogation import Concordance\n all_conc_lines = []\n for sc_name, resu in sorted(conc_results.items()):\n if only_unique:\n unique_results = uniquify(resu)\n else:\n unique_results = resu\n #make into series\n if PYTHON_VERSION == 2:\n pindex = 'c f s l m r'.encode('utf-8').split()\n else:\n pindex = 'c f s l m r'.split()\n for fname, spkr, start, word, end in unique_results:\n #spkr = str(spkr, errors = 'ignore')\n fname = os.path.basename(fname)\n ser = [sc_name, fname, spkr, start, word, end]\n all_conc_lines.append(Series(ser, index=pindex))\n\n if random:\n from random import shuffle\n shuffle(all_conc_lines)\n\n try:\n conc_df = pd.concat(all_conc_lines, axis=1).T\n if all(x == '' for x in list(conc_df['s'].values)):\n conc_df.drop('s', axis=1, inplace=True)\n \n if show_ngram or show_collocates:\n if not language_model:\n counted = Counter(conc_df['m'])\n indices = [l for l in list(conc_df.index) if counted[conc_df.ix[l]['m']] > 1] \n conc_df = conc_df.ix[indices]\n conc_df = conc_df.reset_index(drop=True)\n\n locs['corpus'] = corpus.name\n conc_df = Concordance(conc_df)\n try:\n conc_df.query = locs\n except AttributeError:\n pass\n return conc_df\n\n except ValueError:\n return", "def get_convos():\n # returns array of arrays with line data from movie_conversations.txt\n # ex. convos = [['L194', 'L195', 'L196'], ['L198', L'199']]\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos", "def main():\n # call open_file() to get file pointer \n fd = open_file()\n # call fill completion to get dict, then close the openned file\n full_set = create_dict(fd)\n wrds = find_words(full_set)\n print(wrds)\n fd.close()\n # ask for a prefix in while loop", "def gather_candidates(self, context):\n candidates = []\n\n with open(context['data_file'], 'r') as fp:\n try:\n config = load(fp)\n except JSONDecodeError:\n err_string = 'Decode error for' + context['data_file']\n error(self.vim, err_string)\n config = []\n\n for obj in config:\n candidates.append({\n 'word': obj['option'],\n '__option': obj['option'],\n '__shortname': obj['shortname'],\n '__description': obj['description'],\n 'abbr': f\"{obj['option']:<15}│{obj['shortname']:<10}│{obj['description']:<15}\",\n })\n\n return candidates", "def load_confounds(\n confounds_raw, strategy=[\"minimal\"], n_components=0.95, motion_model=\"6params\"\n):\n if type(confounds_raw) == str:\n confounds_out = _load_confounds_helper(\n confounds_raw,\n strategy=strategy,\n n_components=n_components,\n motion_model=motion_model,\n )\n\n elif type(confounds_raw) == list:\n confounds_out = []\n for file in confounds_raw:\n confounds_out.append(\n _load_confounds_helper(\n file,\n strategy=strategy,\n n_components=n_components,\n motion_model=motion_model,\n )\n )\n\n else:\n confounds_out = 0\n raise ValueError(\"Invalid input type\")\n\n return confounds_out", "def _parse_relevant_lines(cls, conf_file_path):\n # Make a dictionary with the keys of find_words corresponding with\n # empty array as a place holder.\n relevant_lines = dict([(word, []) for word in cls.FIND_WORDS])\n # Now locate the relevant lines in this file and keep the found\n # pattern matches.\n with open(conf_file_path, 'r') as config:\n for line in config:\n # Strip whitespaces\n line = line.strip(\" \\t\")\n # Skip comment lines..\n if line.startswith('#'):\n continue\n for word, pattern in cls.FIND_WORDS.items():\n if \"{} \".format(word) not in line:\n continue\n matches = pattern.findall(line)\n if matches:\n # We only need the first capturing group.\n matches = [match[0].strip(\" \\t\") for match in matches]\n # We will only need the matched strings later on.\n relevant_lines[word] += matches\n return relevant_lines", "def readFileToCorpus(f):\n if os.path.isfile(f):\n file = open(f, \"r\") # open the input file in read-only mode\n i = 0 # this is just a counter to keep track of the sentence numbers\n corpus = [] # this will become a list of sentences\n print(\"Reading file \", f)\n for line in file:\n i += 1\n sentence = line.split() # split the line into a list of words\n #append this lis as an element to the list of sentences\n corpus.append(sentence)\n if i % 1000 == 0:\n #print a status message: str(i) turns int i into a string\n #so we can concatenate it\n sys.stderr.write(\"Reading sentence \" + str(i) + \"\\n\")\n #endif\n #endfor\n return corpus\n else:\n #ideally we would throw an exception here, but this will suffice\n print(\"Error: corpus file \", f, \" does not exist\")\n sys.exit() # exit the script\n #endif", "def extract_programs(outputf):\t\n programs = []\n with open(outputf,'r') as f:\n\t combo_lines = f.readlines()\n for combo_line in combo_lines:\n combo = combo_line.split(' ',1)[1]\n\t programs.append(combo)\n return programs", "def parsec(formatted_file, pattern_tree):\n pattern_path = []\n result_tree = {}\n result_path = []\n for line in formatted_file:\n search(line, pattern_tree, pattern_path, result_tree, result_path)\n return result_tree", "def _gather_confounds(\n signals=None,\n dvars=None,\n std_dvars=None,\n fdisp=None,\n rmsd=None,\n motion=None,\n newpath=None,\n):\n all_files = []\n confounds_list = []\n for confound, name in (\n (signals, \"Global signals\"),\n (std_dvars, \"Standardized DVARS\"),\n (dvars, \"DVARS\"),\n (fdisp, \"Framewise displacement\"),\n (rmsd, \"RMSD\"),\n (motion, \"Motion parameters\"),\n ):\n if confound is not None and isdefined(confound):\n confounds_list.append(name)\n if os.path.exists(confound) and os.stat(confound).st_size > 0:\n all_files.append(confound)\n\n confounds_data = pd.DataFrame()\n for file_name in all_files: # assumes they all have headings already\n new = pd.read_csv(file_name, sep=\"\\t\")\n for column_name in new.columns:\n new.rename(\n columns={column_name: _camel_to_snake(_less_breakable(column_name))}, inplace=True\n )\n\n confounds_data, new = _adjust_indices(confounds_data, new)\n confounds_data = pd.concat((confounds_data, new), axis=1)\n\n if newpath is None:\n newpath = os.getcwd()\n\n combined_out = os.path.join(newpath, \"confounds.tsv\")\n confounds_data.to_csv(combined_out, sep=\"\\t\", index=False, na_rep=\"n/a\")\n\n return combined_out, confounds_list", "def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences", "def getComicsListFromFile(filename):\n h = open(filename)\n contents = \"\\n\".join(h.readlines())\n expr = re.compile(\"([a-z0-9]+)\")\n return expr.findall(contents)", "def collect_confs(self):\n\n sim_confs = []\n failed_sims = []\n solfile = self.gconf['General']['solution_file']\n # Find the data files and instantiate Simulation objects\n base = os.path.expandvars(self.gconf['General']['base_dir'])\n self.log.info(base)\n for root, dirs, files in os.walk(base):\n conf_path = os.path.join(root, 'sim_conf.yml')\n if 'sim_conf.yml' in files and solfile in files:\n self.log.info('Gather sim at %s', root)\n # sim_obj = Simulation(Config(conf_path))\n conf = Config(conf_path)\n # sim_obj.conf.expand_vars()\n sim_confs.append(conf)\n elif 'sim_conf.yml' in files:\n # sim_obj = Simulation(Config(conf_path))\n conf = Config(conf_path)\n self.log.error('Sim %s is missing its data file',\n conf['General']['sim_dir'])\n failed_sims.append(conf)\n self.sim_confs = sim_confs\n self.failed_sims = failed_sims\n if not sim_confs:\n self.log.error('Unable to find any successful simulations')\n raise RuntimeError('Unable to find any successful simulations')\n return sim_confs, failed_sims", "def _FindTarget(self):\n ret = []\n for filename in self._Walk(self._main_directory, \".tex\"):\n skip, cache = self._CacheDataAndSkip(filename)\n if skip:\n ret.extend(cache)\n continue\n\n resp = []\n for i, line in enumerate(codecs.open(filename, 'r', 'utf-8')):\n line = line.rstrip()\n match = re.search(self.collect_regex, line)\n if match is not None:\n lid = re.sub(\".*\" + self.collect_regex + \".*\", r\"\\1\", line)\n if not lid in ret and not lid in resp:\n resp.append( lid )\n #TODO- make it an option if we want gotos for\n #this completion\n self._goto_labels[lid] = (filename, i+1, match.start(1))\n\n self._cached_data[filename] = resp\n ret.extend(resp)\n \"\"\"\n we moved the building of completes to here so we can\n share a cache between square and curly brackets\n \"\"\"\n temp = []\n for i in ret:\n tempo = self.BuildOurCompletes(i)\n temp.append( tempo )\n return temp", "def load_conll(path, exclude=False, file_encoding='utf-8'):\n corpus = []\n\n with open(path) as f:\n sent = []\n for line in f:\n es = line.rstrip().split()\n if len(es) > 1:\n word = es[0].decode(file_encoding).lower()\n# word = RE_NUM.sub(u'0', word)\n tag = es[1].decode(file_encoding)\n syn = es[2].decode(file_encoding)\n ne = es[3].decode(file_encoding) # you can ingore 1-3 for n2n SRL task, but we parse here just in case\n prd = es[4].decode(file_encoding)#Target\n prop = []\n\n if len(es) > 5:\n prop = es[5:]\n sent.append((word, tag, syn, ne, prd, prop))\n else:\n if exclude and (len(sent[0][5]) == 0 or len(sent) < 2):\n pass\n else:\n corpus.append(sent)\n sent = []\n\n if sent:\n corpus.append(sent)\n\n return corpus", "def format_combo(self, combo_file):\n\n models = []\n with open(combo_file, \"r\") as fp:\n for line in fp:\n match = self.output_regex.match(line.strip())\n if match is not None:\n model = match.group(2).strip()\n if model == \"true\" or model == \"false\":\n continue\n complexity = match.group(3).split(\",\")[2].split(\"=\")[1]\n models.append(MosesModel(model, complexity))\n\n return models", "def pipeline(file):\n # special processing is performed to avoid sentence boundaries after abbrevs\n doc = nlp(text_processing.preprocess_text_ents(file))\n grid = get_grid(doc)\n distrib = get_distrib(grid, doc)\n return get_feats(distrib)", "def _load_confounds_helper(\n confound_raw, strategy=[\"minimal\"], n_components=0.95, motion_model=\"6params\"\n):\n if \"nii\" not in confound_raw[-6:]:\n confounds_out = _load_confounds_main(\n confound_raw,\n strategy=strategy,\n n_components=n_components,\n motion_model=motion_model,\n )\n\n else:\n confound_raw = confound_raw.replace(\n \"_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz\",\n \"_desc-confounds_regressors.tsv\",\n )\n confounds_out = _load_confounds_main(\n confound_raw,\n strategy=strategy,\n n_components=n_components,\n motion_model=motion_model,\n )\n return confounds_out", "def get_convos():\n convos = []\n convos_file_path = os.path.join(DATA_PATH, MOVIE_CONVOS_FILE)\n\n with open(convos_file_path, 'r', errors='ignore') as f:\n # +++$+++ is used to split the section in a single line\n # A correct formed line includes four sections\n # The last section is list of lineIDs in each conversation\n\n for line in f:\n line_sections = line.split(' +++$+++ ')\n assert len(line_sections) == 4\n convos.append(line_sections[3][1:-2].replace('\\'', '').split(', '))\n\n return convos", "def xml_to_conll(self, xml_file_path):\n\n if not os.path.exists(CONLL_PATH):\n self.create_directories(CONLL_PATH)\n\n\n for file in os.listdir(xml_file_path):\n\n # Set path to file\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open Files\n chapter_input = open(file, 'r', encoding='utf8')\n\n # Create Same Filename in Output Folder\n chapter_output = open(CONLL_PATH+os.path.split(file)[-1]+'.conll', 'w', encoding='utf8')\n\n print('Converting: ' + chapter_input.name + ' to Conll09 file: ' + chapter_output.name)\n\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n for sentence in chapter_input.find_all('s'):\n line_id = 0\n for terminal in sentence.find_all('t'):\n line_id, terminal_id, form, lemma, plemma = line_id+1, terminal.get('id'), terminal.get('word'), terminal.get('lemma'), terminal.get('lemma')\n pos, ppos = terminal.get('pos'), terminal.get('pos')\n feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1 = \"_\" * 9 # <3 Python!\n chapter_output.write(\"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\"\n \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\n\"\n % (str(line_id)+\"-\"+terminal_id, form, lemma, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1))\n chapter_output.write(\"\\n\")\n\n chapter_output.close()\n\n print(\"Done!\")", "def read_proc_card(self,proc_card,cond=''):\n\n# Define Process\n\n ff=open(proc_card,'r')\n #read step by step \n # 1) find the begin of the definition of process\n # 2) read all process\n # 3) find multiparticle\n \n # 1) find the begin of the definition of process\n\n tag_proc=re.compile(r'''#\\s*Begin\\s+PROCESS\\s*#''',re.I)\n while 1:\n line=ff.readline()\n if line=='':\n sys.exit('bad proc_card information: missing BEGIN PROCESS tag')\n\n if tag_proc.search(line):\n break #(pass to step 2)\n\n # 2) read all process\n\n done=re.compile(r'''done''')\n end_coup=re.compile(r'''end_coup''')\n if cond:\n process=re.compile(r'''>.*@\\s*'''+cond,re.DOTALL)\n else:\n process=re.compile(r'''>''')\n\n while 1:\n line=ff.readline()\n if line=='':\n \n sys.exit('bad proc_card information: missing request process tag (end of file)')\n\n if done.search(line):\n if cond=='0':\n ff.close()\n return self.read_proc_card(proc_card)\n sys.exit('bad proc_card information: missing request process tag')\n \n if process.search(line):\n process_line=line\n break #(pass to step 3)\n\n\n # 3) find multiparticle\n begin_multi=re.compile(r'''#\\s*Begin\\s+MULTIPARTICLES\\s*#''',re.I)\n end_multi =re.compile(r'''#\\s*End\\s+MULTIPARTICLES\\s*#''',re.I)\n info =re.compile(r'''^(?P<tag>[\\S]+)\\s+(?P<multi>[\\S]*)''')\n multi={}\n in_multi_area=0\n while 1:\n line=ff.readline()\n if line=='':\n sys.exit('bad proc_card information: missing multiparticle tag') \n if end_multi.search(line):\n break\n\n if begin_multi.search(line):\n in_multi_area=1\n continue\n \n if in_multi_area:\n if info.search(line):\n info2= info.search(line)\n multi[info2.group('tag').lower()]=info2.group('multi')\n\n\n \n\n return process_line,multi", "def extract_concentrations(goo_file):\n concentrations = np.loadtxt(goo_file)\n if len(concentrations) != 300:\n return None\n return concentrations", "def PrepareCompile(file):\n global oilcc_I,oilcc_o,oilcc_S,oilcc_target\n fp = open(file,'r')\n # some flags\n item = ''; #one item is minimum object such as TASK,ALARM ...\n barcenum = 0;\n flag = False; #has \" { \" encountered or not\n start = False #has match an obj start or not\n for line in fp.readlines():\n #firstly, filter out the comment on this line\n el = DropComment(line);\n if(start == False):\n #{\n item = ''; \n barcenum = 0;\n flag = False;\n if(IsIt('osekObj',el)):\n start = True;\n item += el;\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n if((flag == True) and (barcenum == 0)): #in one line\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n else: # special process for include\n inc = GetIt('include',el)\n if(inc != None): #include file\n flag_inc = False\n for I in oilcc_I:\n finc = I + '/' + inc[0]\n if(os.path.exists(finc)):\n print 'INFO:parse include file <%s> in the path <%s>'%(inc[0],I)\n PrepareCompile(finc);\n flag_inc = True;\n if(flag_inc == False):\n print 'ERROR:cann\\'t find out the file %s!'%(inc[0])\n sys.exit(-1)\n #}\n else:\n #{\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n item += el;\n if((flag == True) and (barcenum == 0)):\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n #}\n fp.close()", "def reduce():\n\n # find obs log\n logname_lst = [fname for fname in os.listdir(os.curdir)\n if fname[-7:]=='.obslog']\n if len(logname_lst)==0:\n print('No observation log found')\n exit()\n elif len(logname_lst)>1:\n print('Multiple observation log found:')\n for logname in sorted(logname_lst):\n print(' '+logname)\n else:\n pass\n\n # read obs log\n logtable = read_obslog(logname_lst[0])\n\n # load config files\n config = configparser.ConfigParser(\n inline_comment_prefixes = (';','#'),\n interpolation = configparser.ExtendedInterpolation(),\n )\n # find local config file\n for fname in os.listdir(os.curdir):\n if fname[-4:]=='.cfg':\n config.read(fname)\n print('Load Congfile File: {}'.format(fname))\n break\n\n # extract keywords from config file\n section = config['data']\n rawpath = section.get('rawpath')\n statime_key = section.get('statime_key')\n exptime_key = section.get('exptime_key')\n section = config['reduce']\n midpath = section.get('midpath')\n odspath = section.get('odspath')\n figpath = section.get('figpath')\n mode = section.get('mode')\n fig_format = section.get('fig_format')\n oned_suffix = section.get('oned_suffix')\n\n # create folders if not exist\n if not os.path.exists(figpath): os.mkdir(figpath)\n if not os.path.exists(odspath): os.mkdir(odspath)\n if not os.path.exists(midpath): os.mkdir(midpath)\n\n nccd = 3\n\n ########################## load file selection #############################\n sel_lst = {}\n filesel_filename = 'file_selection.txt'\n if os.path.exists(filesel_filename):\n sel_file = open(filesel_filename)\n for row in sel_file:\n row = row.strip()\n if len(row)==0 or row[0] in '#':\n continue\n g = row.split(':')\n key, value = g[0].strip(), g[1].strip()\n if len(value)>0:\n sel_lst[key] = value\n sel_file.close()\n\n ################################ parse bias ################################\n bias_file = config['reduce.bias'].get('bias_file')\n\n if mode=='debug' and os.path.exists(bias_file):\n has_bias = True\n # load bias data from existing file\n hdu_lst = fits.open(bias_file)\n # pack bias image\n bias = [hdu_lst[iccd+1].data for iccd in range(nccd)]\n hdu_lst.close()\n message = 'Load bias data from file: {}'.format(bias_file)\n logger.info(message)\n print(message)\n else:\n # read each individual CCD\n bias_data_lst = [[] for iccd in range(nccd)]\n\n # initialize printing infomation\n pinfo1 = FormattedInfo(all_columns, ['frameid', 'fileid', 'object',\n 'exptime', 'nsat_1', 'q95_1', 'nsat_2', 'q95_2',\n 'nsat_3', 'q95_3'])\n\n for logitem in logtable:\n if logitem['object'].strip().lower()=='bias':\n fname = logitem['fileid']+'.fits'\n filename = os.path.join(rawpath, fname)\n hdu_lst = fits.open(filename)\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n hdu_lst.close()\n\n # print info\n if len(bias_data_lst[0]) == 0:\n print('* Combine Bias Images: {}'.format(bias_file))\n print(' '*2 + pinfo1.get_separator())\n print(' '*2 + pinfo1.get_title())\n print(' '*2 + pinfo1.get_separator())\n string = pinfo1.get_format().format(logitem)\n print(' '*2 + print_wrapper(string, logitem))\n\n for iccd in range(nccd):\n bias_data_lst[iccd].append(data_lst[iccd])\n\n n_bias = len(bias_data_lst[0]) # get number of bias images\n has_bias = n_bias > 0\n\n if has_bias:\n # there is bias frames\n print(' '*2 + pinfo1.get_separator())\n\n bias = []\n # the final HDU list\n bias_hdu_lst = fits.HDUList([fits.PrimaryHDU()])\n\n # scan for each ccd\n for iccd in range(nccd):\n ### 3 CCDs loop begins here ###\n bias_data_lst[iccd] = np.array(bias_data_lst[iccd])\n\n section = config['reduce.bias']\n sub_bias = combine_images(bias_data_lst[iccd],\n mode = 'mean',\n upper_clip = section.getfloat('cosmic_clip'),\n maxiter = section.getint('maxiter'),\n mask = (None, 'max')[n_bias>=3],\n )\n\n message = '\\033[{2}mCombined bias for CCD {0}: Mean = {1:6.2f}\\033[0m'.format(\n iccd+1, sub_bias.mean(), (34, 32, 31)[iccd])\n\n print(message)\n\n head = fits.Header()\n head['HIERARCH GAMSE BIAS NFILE'] = n_bias\n\n ############## bias smooth ##################\n section = config['reduce.bias']\n if section.getboolean('smooth'):\n # bias needs to be smoothed\n smooth_method = section.get('smooth_method')\n\n h, w = sub_bias.shape\n if smooth_method in ['gauss', 'gaussian']:\n # perform 2D gaussian smoothing\n smooth_sigma = section.getint('smooth_sigma')\n smooth_mode = section.get('smooth_mode')\n \n bias_smooth = gaussian_filter(sub_bias,\n sigma=smooth_sigma, mode=smooth_mode)\n\n # write information to FITS header\n head['HIERARCH GAMSE BIAS SMOOTH'] = True\n head['HIERARCH GAMSE BIAS SMOOTH METHOD'] = 'GAUSSIAN'\n head['HIERARCH GAMSE BIAS SMOOTH SIGMA'] = smooth_sigma\n head['HIERARCH GAMSE BIAS SMOOTH MODE'] = smooth_mode\n else:\n print('Unknown smooth method: ', smooth_method)\n pass\n\n sub_bias = bias_smooth\n else:\n # bias not smoothed\n head['HIERARCH GAMSE BIAS SMOOTH'] = False\n\n bias.append(sub_bias)\n bias_hdu_lst.append(fits.ImageHDU(data=sub_bias, header=head))\n ### 3 CCDs loop ends here ##\n\n # write bias into file\n bias_hdu_lst.writeto(bias_file, overwrite=True)\n\n else:\n # no bias found\n pass\n\n ########################## find flat groups #########################\n flat_file = config['reduce.flat'].get('flat_file')\n\n flatdata_lst = []\n # a list of 3 combined flat images. [Image1, Image2, Image3]\n # bias has been corrected already. but not rotated yet.\n flatmask_lst = []\n # a list of 3 flat masks\n\n if mode=='debug' and os.path.exists(flat_file):\n # read flat data from existing file\n hdu_lst = fits.open(flat_file)\n for iccd in range(nccd):\n flatdata_lst.append(hdu_lst[iccd*2+1].data)\n flatmask_lst.append(hdu_lst[iccd*2+2].data)\n flatdata = hdu_lst[nccd*2+1].data.T\n flatmask = hdu_lst[nccd*2+2].data.T\n hdu_lst.close()\n message = 'Loaded flat data from file: {}'.format(flat_file)\n print(message)\n\n # alias of flat data and mask\n flatdata1 = flatdata_lst[0].T\n flatmask1 = flatmask_lst[0].T\n flatdata2 = flatdata_lst[1].T\n flatmask2 = flatmask_lst[1].T\n flatdata3 = flatdata_lst[2].T\n flatmask3 = flatmask_lst[2].T\n\n else:\n print('*'*10 + 'Parsing Flat Fieldings' + '*'*10)\n # print the flat list\n pinfo_flat = FormattedInfo(all_columns, ['frameid', 'fileid', 'object',\n 'exptime', 'nsat_1', 'q95_1', 'nsat_2', 'q95_2', 'nsat_3', 'q95_3'])\n print(' '*2 + pinfo_flat.get_separator())\n print(' '*2 + pinfo_flat.get_title())\n print(' '*2 + pinfo_flat.get_separator())\n for logitem in logtable:\n if len(logitem['object'])>=8 and logitem['object'][0:8]=='flatlamp':\n string = pinfo_flat.get_format().format(logitem)\n print(' '*2 + print_wrapper(string, logitem))\n print(' '*2 + pinfo_flat.get_separator())\n\n\n flat_group_lst = {}\n for iccd in range(nccd):\n\n key = 'flat CCD%d'%(iccd+1)\n sel_string = sel_lst[key] if key in sel_lst else ''\n prompt = '\\033[{1}mSelect flats for CCD {0} [{2}]: \\033[0m'.format(\n iccd+1, (34, 32, 31)[iccd], sel_string)\n\n # read selected files from terminal\n while(True):\n input_string = input(prompt)\n if len(input_string.strip())==0:\n # nothing input\n if key in sel_lst:\n # nothing input but already in selection list\n flat_group_lst[iccd] = parse_num_seq(sel_lst[key])\n break\n else:\n # repeat prompt\n continue\n else:\n # something input\n frameid_lst = parse_num_seq(input_string)\n # pack\n flat_group_lst[iccd] = frameid_lst\n # put input string into selection list\n sel_lst[key] = input_string.strip()\n break\n\n # now combine flat images\n\n flat_hdu_lst = [fits.PrimaryHDU()]\n # flat_hdu_lst is the final HDU list to be saved as fits\n\n for iccd in range(nccd):\n frameid_lst = flat_group_lst[iccd]\n\n # now combine flats for this CCD\n flat_data_lst = []\n # flat_data_lst is a list of flat images to be combined.\n # flat_data_lst = [Image1, Image2, Image3, Image4, ... ...]\n\n #scan the logtable\n # log loop inside the CCD loop because flats for different CCDs are\n # in different files\n for logitem in logtable:\n if logitem['frameid'] in frameid_lst:\n filename = os.path.join(rawpath, logitem['fileid']+'.fits')\n hdu_lst = fits.open(filename)\n data_lst, mask_lst = parse_3ccd_images(hdu_lst)\n hdu_lst.close()\n\n # correct bias and pack into flat_data_lst\n if has_bias:\n flat_data_lst.append(data_lst[iccd]-bias[iccd])\n else:\n flat_data_lst.append(data_lst[iccd])\n\n # initialize flat mask\n if len(flat_data_lst) == 1:\n flatmask = mask_lst[iccd]\n flatmask = flatmask | mask_lst[iccd]\n\n n_flat = len(flat_data_lst)\n\n if n_flat == 0:\n continue\n elif n_flat == 1:\n flatdata = flat_data_lst[0]\n else:\n flat_data_lst = np.array(flat_data_lst)\n flatdata = combine_images(flat_data_lst,\n mode = 'mean',\n upper_clip = 10,\n maxiter = 5,\n mask = (None, 'max')[n_flat>=3],\n )\n #print('\\033[{1}mCombined flat data for CCD {0}: \\033[0m'.format(\n # iccd+1, (34, 32, 31)[iccd]))\n flatdata_lst.append(flatdata)\n flatmask_lst.append(flatmask)\n\n # pack the combined flat data into flat_hdu_lst\n head = fits.Header()\n head['HIERARCH GAMSE FLAT CCD{} NFILE'.format(iccd+1)] = n_flat\n flat_hdu_lst.append(fits.ImageHDU(flatdata, head))\n flat_hdu_lst.append(fits.ImageHDU(flatmask))\n # CCD loop ends here\n\n # alias of flat data and mask\n flatdata1 = flatdata_lst[0].T\n flatmask1 = flatmask_lst[0].T\n flatdata2 = flatdata_lst[1].T\n flatmask2 = flatmask_lst[1].T\n flatdata3 = flatdata_lst[2].T\n flatmask3 = flatmask_lst[2].T\n\n # mosaic flat data\n flatdata, flatmask = mosaic_3_images(\n data_lst = (flatdata1, flatdata2, flatdata3),\n mask_lst = (flatmask1, flatmask2, flatmask3),\n )\n\n flat_hdu_lst.append(fits.ImageHDU(flatdata.T))\n flat_hdu_lst.append(fits.ImageHDU(flatmask.T))\n # write flat data to file\n flat_hdu_lst = fits.HDUList(flat_hdu_lst)\n flat_hdu_lst.writeto(flat_file, overwrite=True)\n print('Flat data writed to {}'.format(flat_file))\n\n ######################### find & trace orders ##########################\n\n # simple debackground for all 3 CCDs\n xnodes = np.arange(0, flatdata1.shape[1], 200)\n flatdbkg1 = simple_debackground(flatdata1, flatmask1, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n xnodes = np.arange(0, flatdata2.shape[1], 200)\n flatdbkg2 = simple_debackground(flatdata2, flatmask2, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n xnodes = np.arange(0, flatdata3.shape[1], 200)\n flatdbkg3 = simple_debackground(flatdata3, flatmask3, xnodes, smooth=20,\n deg=3, maxiter=10)\n\n allimage, allmask = mosaic_3_images(\n data_lst = (flatdbkg1, flatdbkg2, flatdbkg3),\n mask_lst = (flatmask1, flatmask2, flatmask3),\n )\n\n tracefig = TraceFigure()\n\n section = config['reduce.trace']\n aperset = find_apertures(allimage, allmask,\n scan_step = section.getint('scan_step'),\n minimum = section.getfloat('minimum'),\n separation = section.get('separation'),\n align_deg = section.getint('align_deg'),\n filling = section.getfloat('filling'),\n degree = section.getint('degree'),\n display = section.getboolean('display'),\n fig = tracefig,\n )\n # decorate trace fig and save to file\n tracefig.adjust_positions()\n tracefig.suptitle('Trace for all 3 CCDs', fontsize=15)\n figfile = os.path.join(figpath, 'trace.png')\n tracefig.savefig(figfile)\n\n trcfile = os.path.join(midpath, 'trace.trc')\n aperset.save_txt(trcfile)\n\n regfile = os.path.join(midpath, 'trace.reg')\n aperset.save_reg(regfile, transpose=True)\n\n # save mosaiced flat image\n trace_hdu_lst = fits.HDUList(\n [fits.PrimaryHDU(allimage.T),\n fits.ImageHDU(allmask.T),\n ])\n trace_hdu_lst.writeto(config['reduce.trace'].get('file'), overwrite=True)\n\n ######################### Extract flat spectrum ############################\n\n spectra1d = extract_aperset(flatdata, flatmask,\n apertureset = aperset,\n lower_limit = 6,\n upper_limit = 6,\n )\n\n flatmap = get_slit_flat(flatdata, flatmask,\n apertureset = aperset,\n spectra1d = spectra1d,\n lower_limit = 6,\n upper_limit = 6,\n deg = 7,\n q_threshold = 20**2,\n figfile = 'spec_%02d.png',\n )\n fits.writeto('flat_resp.fits', flatmap, overwrite=True)", "def readCC(Ped_File, vcfIndivs):\n\n case = {} # case hash table: Key = ID Value = Sex\n control = {} # control hash table: Key = ID Value = Sex\n caseControl = {} # cases and controls hash table: Key = ID Value = index in vcf\n\n indivSet = Set(vcfIndivs) # convert array to Set to decrease lookup time.\n\n with open(Ped_File) as file:\n for line in file:\n field = line.strip().split('\\t')\n\n indiv_ID = field[1]\n father_ID = field[2]\n mother_ID = field[3]\n ptype = field[5] # case/control status: 1=control, 2=case\n\n if indiv_ID not in indivSet:\n sys.stderr.write('Individual {} is not in vcf.\\n'.format(indiv_ID))\n continue\n\n if field[4] == '1':\n sex = 'male'\n elif field[4] == '2':\n sex = 'female'\n else:\n sex = 'NA'\n\n if(father_ID != '0' or mother_ID != '0'):\n continue\n\n elif(ptype == '2'):\n case[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n elif(ptype == '1'):\n control[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n print 'Number of cases in hash table = {}.'.format(len(case))\n print 'Number of controls in hash table = {}.'.format(len(control))\n return case, control, caseControl", "def findSubcorpora():\n counterTwo = 0\n\n with open('bc.processed2.csv', 'r') as readfile:\n for line in readfile:\n counterTwo += 1\n if re.match('^<subcorpus', line):\n print(str(counterTwo) + '\\t' + line + '\\n')", "def selectCalibFile(self, files, rnum) :\n list_cf = []\n for path in files : \n fname = os.path.basename(path)\n\n if fname is 'HISTORY' : continue\n if os.path.splitext(fname)[1] != '.data' : continue\n\n cf = CalibFile(path)\n if cf.valid :\n modification_time = os.path.getmtime(path)\n list_cf.append((modification_time, cf))\n \n list_cf_ord = [x for _,x in sorted(list_cf)]\n \n # search for the calibration file\n for cf in list_cf_ord[::-1] :\n if cf.get_begin() <= rnum and rnum <= cf.get_end() :\n return cf.get_path()\n\n # if no matching found\n return ''", "def classify(self, filename):\n tokens = preprocess.parse_book(filename)\n perplexities = [m.perplexity(tokens) for m in self.models]\n print('-> perplexities = {}'.format([round(p) for p in perplexities]))\n over_baseline = [p - b for p, b in zip(perplexities, self.baselines)]\n print('-> over_baseline = {}'.format([round(o) for o in over_baseline]))\n min_index = over_baseline.index(min(over_baseline))\n return self.genres[min_index]", "def svc_classify(self, filename):\n tokens = preprocess.parse_book(filename)\n perplexities = [m.perplexity(tokens) for m in self.models]\n return self.genres[self.svc.predict(perplexities)]", "def simParser(filePath):\n\t\tresults = []\n\t\twith open(filePath + \".txt\", \"r\") as execFile:\n\t\t\tcontent = execFile.read()\n\n\t\t\tcycleStr = search(r'([cC]ycles.*?:\\s*)(\\d+)', content)\n\t\t\tassemblyInst = search(r'([iI]nstructions.*?:\\s*)(\\d+(.\\d+)?)', content)\n\n\t\t\tif cycleStr: results.append(cycleStr.group(2))\n\t\t\tif assemblyInst: results.append(assemblyInst.group(2))\n\n\t\treturn results", "def __load_correspondences(self):\n self.path_to_correspondences = QFileDialog.getOpenFileName(self,\n 'Open file',\n os.getcwd(),\n \"Text files (*.txt)\"\n )[0]\n try:\n with open(self.path_to_correspondences, 'r') as corr_file:\n for line in corr_file.readlines():\n point_a, point_b = map(int, line.strip().split(\" \"))\n except FileNotFoundError:\n QtWidgets.QMessageBox.warning(self,\n \"Error\",\n f\"Correspondences file was not found at {self.path_to_correspondences}\"\n )\n except ValueError:\n QtWidgets.QMessageBox.warning(self,\n \"Error\",\n f\"Correspondences file at {self.path_to_correspondences} has wrong format\"\n )", "def _collect(self, conll_directory) -> Iterator[Any]:\n logging.info(\"Reading .conll from %s\", conll_directory)\n return dataset_path_iterator(conll_directory, self.configs.file_ext)", "def selectCases(self):\n\n casenames = []\n self.fileIDs = \"\"\n self.caseIDs = \"\" # default for all cases and allows the file selection search method to occur\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from cases\")\n result = cur.fetchall()\n for row in result:\n casenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n\n Dialog_selectcase = QtGui.QDialog()\n ui = Ui_Dialog_selectfile(casenames)\n ui.setupUi(Dialog_selectcase, \"Select case(s) to view\", \"many\")\n ok = Dialog_selectcase.exec_()\n if ok:\n tmp_IDs = \"\"\n selectedCases = ui.getSelected() # list of dictionaries\n for row in selectedCases:\n tmp_IDs += \",\" + str(row['id'])\n if len(tmp_IDs) > 0:\n self.caseIDs = tmp_IDs[1:]", "def process_csvs(conn: Connection, basedir: Path) -> None:\n process_files(conn, basedir/\"files.csv\")\n process_notes(conn, basedir/\"notes.csv\")\n process_links(conn, basedir/\"links.csv\")\n process_clusters(conn, basedir/\"clusters.csv\")\n process_bibliography(conn, basedir/\"bibliography.csv\")\n process_citations(conn, basedir/\"citations.csv\")", "def extract_cuewords(cuewords, xml_file_path):\n\n try:\n file_output = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE, 'w', encoding='utf8')\n file_output_pos_tagged = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE_POS_TAGGED,\n 'w', encoding='utf8')\n\n except FileNotFoundError:\n print('Please set correct filenames')\n\n # Empty lists for collecting data per file\n cueword_ids = []\n cuewords = []\n\n # Empty list to collect data for all files\n all_cuewords = []\n all_cuewords_pos_tagged = []\n\n print('Extracting cuewords from:', xml_file_path, 'to:', CUEWORDS_DATA_PATH+CUEWORDS_FILE)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # For each file, open, parseXML\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n file_input = open(file, 'r', encoding='utf8')\n file_input = BeautifulSoup(file_input, 'xml')\n\n # Collect frames, get ids\n for frame in file_input.find_all('frame', {'name' : NEGATION_FRAME_NAME}):\n for target in frame.find_all('target'):\n for fenode in target.find_all('fenode'):\n cueword_ids.insert(0, fenode.get('idref'))\n\n # Find all splitwords\n for splitword in file_input.find_all('splitword'):\n cueword_ids.insert(0, splitword.get('idref'))\n\n # Find all terminals, check if its ID is in cueword_ids[]\n for terminal in file_input.find_all('t'):\n if terminal.get('id') in cueword_ids:\n all_cuewords.insert(0, terminal.get('word').lower())\n all_cuewords_pos_tagged.insert(0, terminal.get('word').lower()+\n '\\t'+terminal.get('pos'))\n\n # clear list for next document\n cueword_ids = []\n cuewords = []\n\n # Sort final list\n all_cuewords = sorted(set(all_cuewords))\n all_cuewords_pos_tagged = sorted(set(all_cuewords_pos_tagged))\n\n # Write cuewords without duplicates to file:\n for cueword in all_cuewords:\n file_output.write(cueword+'\\n')\n\n for cueword in all_cuewords_pos_tagged:\n file_output_pos_tagged.write(cueword+'\\n')\n\n file_output.close()\n file_output_pos_tagged.close()\n\n print('Cuewords extracted to:', file_output.name)\n print('Cuewords extracted and POS tagged to:', file_output_pos_tagged.name)\n print('Done!')", "def main(input_file, visualize):\n logging.info('Reading lines...')\n\n with open(input_file) as f:\n content = f.read()\n\n clauses, thesis = content.split('---\\n')\n\n logging.info('Parsing clauses...')\n parser = ClauseParser()\n parsed_clauses = parser.parse_cnf_list(clauses.splitlines())\n parsed_thesis = parser.parse_cnf_list(thesis.splitlines())\n\n result, tree = resolution(parsed_clauses, parsed_thesis)\n\n if visualize:\n display_resolution_tree(tree)\n\n logging.info(f'The thesis is {result}')", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfile:\n for line in readfile:\n matcher.find_match(line, self.case_insensitive)\n\n # collect unreadeable files for error log\n except Exception:\n self.errors.append(str(filepath))\n\n # copy results and reset matcher for next file\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n matcher.reset()\n\n # output - print or json\n if self.results:\n self.output(element)\n\n # if json print progress bar\n if self.json:\n self.progress_bar(i+1, len(self.input), prefix=\"Matching:\",\n fixed_len=True, length=40)", "def process_confounds(confounds_file, a_comp_cor=True):\n confounds_df = pd.read_csv(confounds_file, sep = '\\t', \n na_values=['n/a']).fillna(0)\n excessive_movement = (confounds_df.framewise_displacement>.5) & \\\n (confounds_df.std_dvars>1.2)\n excessive_movement_TRs = excessive_movement[excessive_movement].index\n excessive_movement_regressors = np.zeros([confounds_df.shape[0], \n np.sum(excessive_movement)])\n for i,TR in enumerate(excessive_movement_TRs):\n excessive_movement_regressors[TR,i] = 1\n excessive_movement_regressor_names = ['rejectTR_%d' % TR for TR in \n excessive_movement_TRs]\n # get movement regressors\n movement_regressor_names = ['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z']\n movement_regressors = confounds_df.loc[:,movement_regressor_names]\n movement_regressor_names += [i+'td' for i in movement_regressor_names]\n movement_regressors = np.hstack((movement_regressors, np.gradient(movement_regressors,axis=0)))\n # add square\n movement_regressor_names += [i+'_sq' for i in movement_regressor_names]\n movement_regressors = np.hstack((movement_regressors, movement_regressors**2))\n \n # add additional relevant regressors\n add_regressor_names = ['framewise_displacement'] \n if a_comp_cor: \n add_regressor_names += [i for i in confounds_df.columns if 'a_comp_cor' in i]\n additional_regressors = confounds_df.loc[:,add_regressor_names].values\n regressors = np.hstack((movement_regressors,\n additional_regressors,\n excessive_movement_regressors))\n # concatenate regressor names\n regressor_names = movement_regressor_names + add_regressor_names + \\\n excessive_movement_regressor_names\n return regressors, regressor_names", "def extract_cuewords(self, cuewords, xml_file_path):\n\n # Create output files\n if not os.path.exists(CUEWORDS_DATA_PATH):\n self.create_directories(CUEWORDS_DATA_PATH)\n try:\n file_output = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE, 'w', encoding='utf8')\n file_output_pos_tagged = open(CUEWORDS_DATA_PATH+CUEWORDS_FILE_POS_TAGGED,\n 'w', encoding='utf8')\n\n except FileNotFoundError:\n print('Please set correct filenames')\n\n # Empty lists for collecting data per file\n cueword_ids = []\n cuewords = []\n\n # Empty list to collect data for all files\n all_cuewords = []\n all_cuewords_pos_tagged = []\n\n print('Extracting cuewords from:', xml_file_path, 'to:', CUEWORDS_DATA_PATH+CUEWORDS_FILE)\n\n if not os.path.exists(xml_file_path):\n self.create_directories(xml_file_path)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # For each file, open, parseXML\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n file_input = open(file, 'r', encoding='utf8')\n file_input = BeautifulSoup(file_input, 'xml')\n\n # Collect frames, get ids\n for frame in file_input.find_all('frame', {'name' : NEGATION_FRAME_NAME}):\n for target in frame.find_all('target'):\n for fenode in target.find_all('fenode'):\n cueword_ids.insert(0, fenode.get('idref'))\n\n # Find all splitwords\n for splitword in file_input.find_all('splitword'):\n cueword_ids.insert(0, splitword.get('idref'))\n\n # Find all terminals, check if its ID is in cueword_ids[]\n for terminal in file_input.find_all('t'):\n if terminal.get('id') in cueword_ids:\n all_cuewords.insert(0, terminal.get('word').lower())\n all_cuewords_pos_tagged.insert(0, terminal.get('word').lower()+\n '\\t'+terminal.get('pos'))\n\n # clear list for next document\n cueword_ids = []\n cuewords = []\n\n # Sort final list\n all_cuewords = sorted(set(all_cuewords))\n all_cuewords_pos_tagged = sorted(set(all_cuewords_pos_tagged))\n\n # Write cuewords without duplicates to file:\n for cueword in all_cuewords:\n file_output.write(cueword+'\\n')\n\n for cueword in all_cuewords_pos_tagged:\n file_output_pos_tagged.write(cueword+'\\n')\n\n file_output.close()\n file_output_pos_tagged.close()\n\n print('Cuewords extracted to:', file_output.name)\n print('Cuewords extracted and POS tagged to:', file_output_pos_tagged.name)\n print('Done!')\n\n return cuewords", "def parse_file(self):\n for num, line in enumerate(self._text):\n if \"CRYSTAL STRUCTURE SOLUTION\" in line:\n line = line.strip().strip('+').strip()\n if 'SHELXTL' in line:\n self.version = 'SHELXT ' + line.split()[-1]\n if line.strip().startswith('R1 Rweak Alpha'):\n for n in range(100):\n if not self._text[num + 1 + n]:\n break\n if self._text[num + 1]:\n self.solutions[self._text[num + 1 + n][58:76].strip()] = self._text[num + 1 + n][37:51].strip()", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>'] #TODO: Change\n data.append(sent)\n return data", "def process_all():\n\tfiles = os.listdir('records')\n\tfiles = [file for file in files if file not in ('.DS_Store','old')]\n\tattr_list = []\n\tcorpus = []\n\tsentences = []\n\tcorp_set = set()\n\tfor file in files:\n\t\twith open('records/'+file) as f:\n\t\t\tattr_list, corpus, sentences = proc_file(f,file,corpus,attr_list,corp_set,sentences)\n\treturn attr_list,corpus,sentences", "def read_run_cfds(run_cfds, run_len):\n if len(run_cfds) == 0:\n run_cfd = np.zeros((run_len, 0))\n spike_mask = np.zeros(0, dtype=int)\n\n else:\n try:\n run_cfds = [np.genfromtxt(cfd, dtype='f8') for cfd in run_cfds]\n except:\n raise NPDLError('Confound text file(s) have bad format')\n\n len_checks = [cfd.shape[0] == run_len for cfd in run_cfds]\n if False in len_checks:\n raise NPDLError('Confound regressors should have same length ' +\n 'as matching functional run.')\n\n # Need to reshape in case you get a 1-D confound\n run_cfds = [cfd.reshape((run_len, -1)) for cfd in run_cfds]\n run_cfd = np.hstack(run_cfds)\n\n # Determine type of confound\n # Normalize continuous covariates (in particular, center)\n spike_mask = np.ones(run_cfd.shape[1], dtype=int)\n for i in range(run_cfd.shape[1]):\n if np.sum(run_cfd[:, i] == np.round(run_cfd[:, i])) < run_len:\n run_cfd[:, i] = normalize(run_cfd[:, i])\n spike_mask[i] = 0\n elif np.sum(run_cfd[:, i]) != 1.:\n raise NPDLError(('Only continuous covariates and spike regressors' +\n 'accepted as confounds.'))\n return run_cfd, spike_mask", "def _detectCnv(self, pfb_file, sample_file, sel_chr=range(1, 23),\n sample_cols=[0,1,2,3,4], pfb_cols=[0, -1]):\n STATE = -1 #index in a region_row\n sel_chr = map(str, sel_chr)\n sample_rows = parse_sample(sample_file, sample_cols)\n pfb_rows = pfb_file and parse_pfb(pfb_file, pfb_cols) or []\n for chr, snp, loc, lrr, baf, pfb, d in group_snp_o_d(\n sample_rows, pfb_rows, sel_chr):\n q, p = self.detect(viterbi, lrr, baf, pfb, d)\n for row in iter_regions(snp, loc, q):\n row = list(row)\n row[STATE] = self.States[row[STATE]]\n yield tuple([chr,]+row)", "def getCompoundsAndWords(Config, docs):\n\n joblist = docs\n nProc = min(12, Config.nProc)\n pool = mp.Pool(processes=nProc)\n chunkSize = max(1, len(joblist)//(4*nProc))\n chunkedList = [[i, joblist[i:i + chunkSize], Config] for i in range(0, len(joblist), chunkSize)]\n print(\"Getting compounds #jobs\", len(chunkedList), \" chunkSize\", chunkSize, \" #proc\", nProc)\n compsWords = pool.map(parseSentences, chunkedList)\n print(\"Got all compounds\")\n # merge results\n compounds = [r[0] for r in compsWords]\n words = [r[1] for r in compsWords]\n newdocs = [r[2] for r in compsWords]\n #wcou = [r[3] for r in compsWords]\n ndocs = sum(newdocs, [])\n allocc = dict(Counter(sum(compounds, [])))\n #allwcou= sum((Counter(dict(x)) for x in wcou), Counter())\n\n #merge mappings from different processes\n mwords = words[0]\n for ws in words[1:]:\n for k, v in ws.items():\n addWord(mwords, k, v)\n\n return Counter(allocc), mwords, ndocs", "def process_corpus(args):\n\n fs = open(args.input,'r')\n out = list()\n for line in fs:\n blob = TextBlob(line.strip())\n result_info = dict()\n result_info\n result_info['correct'] = str(blob.correct())\n if args.parse :\n result_info['parse'] = get_parsed_text(blob)\n if args.tokenize:\n result_info['tokenize'] = get_tokenizer_result(blob)\n if args.sentiment:\n result_info['sentiment'] = analyze_sentiment(blob)\n if args.sentence_sentiment:\n result_info['sentence_sentiment'] = analyze_sentence_sentiment(blob)\n if args.noun_phrase:\n result_info['noun_phrase'] = get_noun_phrases(blob)\n if args.pos:\n result_info['pos'] = get_pos_tags(blob)\n\n out.append(result_info)\n print out\n json.dump(out,open('out.json','w'))\n fs.close()\n print '******************************* Execution completed *********************************'", "def _FindLabels(self):\n texs = \" \".join(glob.glob(\"*.tex\"))\n cat_process = subprocess.Popen(shlex.split(\"cat %s\" % texs),\n stdout=subprocess.PIPE)\n grep_process = subprocess.Popen(shlex.split(r\"grep \\\\\\\\label\"),\n stdin=cat_process.stdout,\n stdout=subprocess.PIPE)\n cat_process.stdout.close()\n\n lines = grep_process.communicate()[0]\n\n ret = []\n for label in lines.split(\"\\n\"):\n ret.append(responses.BuildCompletionData(\n re.sub(r\".*\\label{(.*)}.*\", r\"\\1\", label)\n )\n )\n\n return ret", "def crapome_parser():\n import os.path\n \n # contTreshold = 30 # set this to the desired contamination score\n resD = {}\n \n # crapFile = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"1503486016360_gp-1.txt\"),\"rU\")\n crapFile = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"Crapome-all-proteins-ptpn22-ip-ctl.txt\"),\"rU\")\n \n headerFlag = True\n \n fileLength = 0\n for inpLine in crapFile: # parse crapome output file\n if headerFlag:\n headerFlag = False\n continue\n fileLength += 1\n lineList = inpLine.split(\"\\t\")\n if lineList[2] == \"\": continue\n elif len(lineList) > 2: contScore = int(lineList[2].split(\"/\")[0])\n else: contScore = 0\n \n # if contScore < contTreshold:\n resD[lineList[0]] = contScore\n \n # print \"Contaminant treshold: \" + str(contTreshold)\n \n print(\"lines parsed: \" + str(fileLength))\n print(\"Number of results: \" + str(len(resD)))\n \n # inpFile = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1_no0.csv\"),\"r\")\n # outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1_no0_crapome.csv\"),\"w\")\n inpFile = open(os.path.join(\"/home/mate/workspace/katamari/src/ed/bob/processed\", \"OST-24-05-2017_combined_ttest_ed_2.csv\"),\"rU\")\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"processed\", \"OST-24-05-2017_combined_ttest_ed_2_cr.csv\"),\"w\")\n\n \n \n headerFlag = True\n for inpLine in inpFile: # parse the input file for crapome and add crapome results to it\n inpList = inpLine.rstrip(\"\\n\").split(\",\")\n for inpI in inpList:\n outF.write(inpI + \",\")\n \n if headerFlag: \n outF.write(\"Crapome score\")\n headerFlag = False\n elif inpList[2].upper() in resD: outF.write(str(resD[inpList[2].upper()]))\n else: outF.write(\"0\")\n \n outF.write(\"\\n\")\n print(\"results written to file\")", "def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)", "def parse_corenlp_coref_xml_doc(input_dir = 'CoreNLP_coref_anno/dev'):\n\n\tmentions = []\n\tfor file in os.listdir(input_dir):\n\t\ttree = ET.parse(input_dir + '/' + file)\n\t\tdocument = tree.getroot()[0]\n\t\t# sentences_node = document.find('sentences')\n\n\t\t# for sentence in enumerate(sentences_node):\n\t\t# \ts_num = sentence.attribs['id']\n\t\t# \tsentence_text = \" \".join([token.word for token in sentence.find('tokens')])\n\t\t# \tsentences[s_num] = sentence_text\n\n\t\tcoref_node = document.find('coreference')\n\t\t\n\t\tfor coref_id, coref_chain in enumerate(coref_node):\n\t\t\tfor mention in cluster:\n\t\t\t\tsent_num = int(mention[0].text)\n\t\t\t\tstart = int(mention[1].text)-1\n\t\t\t\tend = int(mention[2].text)-1\n\t\t\t\ttext = mention[4].text\n\t\t\t\tmentions.append({\"filename\":file, \"s_num\":sent_num,\"EP\":\"E\", \"indices\":range(start, end),\"coref\":coref_id+1})\n\n\tmentions.sort(key=lambda x:(x[\"filename\"],x[\"s_num\"],x[\"indices\"][0]))\n\twith open('coref_output.txt', 'w') as out_file:\n\t\tout_file.write(\"file\\tsentence\\tentity(E) or predicate(P)\\t coref chain\\tindices\\t\\n\")\n\t\tout_file.write(\"\\n\".join([e[\"filename\"]+\"\\t\"+str(e[\"s_num\"])+\"\\t\"+e[\"EP\"]+\"\\t\"+str(e[\"coref\"])+\"\\t\"+str(e[\"indices\"])[1:-1] for e in mentions]))", "def parse_solution_file(file):\n result = \"\"\n _file = open(file, 'r')\n lines = _file.readlines()\n for line in range(1, len(lines)):\n result += lines[line]\n return result", "def findFiles(self):\n\n with open('analysis_result/firmwalkerOutput.txt', 'r') as firmwalker:\n for line in firmwalker:\n if line.startswith('##################################### ssh'):\n self.ssh = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### dropbear'):\n self.dropbear = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### busybox'):\n self.busyBox = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### telnet'):\n self.telnet = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### openssl'):\n self.openssl = next(firmwalker).strip('d/').strip('\\n')", "def _get_semantic_cuts(self, cuts_file, compute_recursive_cuts):\n sc1 = []\n sc2 = []\n if cuts_file is not None:\n sc1 = self._load_semantic_cuts_from_file(cuts_file)\n if compute_recursive_cuts:\n sc2 = self._compute_semantic_cuts()\n return sc1 + sc2", "def test_parse_consel(self):\n output_exp = ['0.99', '0.01']\n with open(self.consel_output_hgt_fp, 'r') as f:\n output = parse_consel(input_f=f)\n self.assertListEqual(output, output_exp)", "def main():\n cpp = read_file('studenci_cpp.txt')\n python = read_file('studenci_python.txt')\n return [student for student in cpp if student in python]", "def run(self):\n matched = ['Program Description']\n matched += sorted(self.matching_recipes())\n\n msg = 'Your Search For:'\n msg += PREFIX + PREFIX.join([\"'\" + word + \"'\" for word in self.words])\n msg += '\\nMatched These Recipes:'\n msg += PREFIX + PREFIX.join(matched)\n print(msg)\n return matched", "def _process_candidate_conf_files(self, reordered_files):\n confs = []\n for r, f in reordered_files:\n if not os.path.exists(f):\n continue\n\n conf = ConfFile(f, self.syspaths)\n conf.replace(self.remap_renamer)\n temp_name = \"%s...%s\" % (r['from'], r['to'])\n conf.path = conf.path.replace(r['from'], temp_name)\n conf.path = conf.path.replace(temp_name, r['to'])\n confs.append(conf)\n\n return confs", "def get_nsite_DMRfind(inputf,output,samples,path_to_allc=\"\",mc_type=[\"C\"],num_procs=1,use_mc_status=True,min_cov=0):\n #dictionary of sample_name -> file handle\n allc_files = {}\n allc_lines = {}\n allc_fields = {}\n allc_prevbyte = {} #sample_name -> prevbyte (started from) in the file\n with open(inputf,'r') as f, open(output,'w') as g:\n line = f.readline()\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\")\n prefix_len = len(fields) #number of fields in original file\n mc_type = expand_nucleotide_code(mc_type)\n g.write(\"\\t\".join(fields[:prefix_len])+\"\\t\"+\"\\t\".join([\"nsite_\"+sample for sample in samples])+\"\\n\")\n prev_chrom = \"\"\n prev_end = \"\"\n dmr_lines=[]\n methylation_levels = {}\n for line in f:\n line = line.rstrip(\"\\n\")\n dmr_lines.append(line)\n if num_procs == 1:\n for sample in samples:\n methylation_levels[sample]=get_nsite_DMRfind_worker(dmr_lines,mc_type,sample,path_to_allc,output,min_cov,use_mc_status=False)\n else:\n pool = Pool(num_procs)\n results = {}\n for sample in samples:\n results[sample]=pool.apply_async(get_nsite_DMRfind_worker,(dmr_lines,mc_type,sample,path_to_allc,output,min_cov),{\"use_mc_status\":False})\n pool.close()\n pool.join()\n for sample in results:\n methylation_levels[sample]=results[sample].get()\n temp_files = {}\n for sample in samples:\n temp_files[sample]=open(output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\",'r')\n\n for index,line in enumerate(dmr_lines):\n g.write(line)\n for sample in samples:\n #g.write(\"\\t\"+methylation_levels[sample][index])\n g.write(\"\\t\"+temp_files[sample].readline().rstrip(\"\\n\"))\n g.write(\"\\n\")\n for sample in samples:\n temp_files[sample].close()\n subprocess.check_call(shlex.split(\"rm \"+output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\"))", "def __get_configurations(self):\n for sect in self.file_parser.sections():\n cascade_list = []\n if self.file_parser.has_option(sect, 'cascade'):\n enabled = self.file_parser.get(sect, 'enabled')\n # pylint: disable = E1103\n optimp_list = self.file_parser.get(sect, 'cascade').split(',')\n # pylint: enable = E1103\n for cascade_str in optimp_list:\n cascade_list.append(tuple(cascade_str.split(':')))\n # pylint: disable = E1103\n lower_enabled = enabled.lower()\n # pylint: enable = E1103\n if lower_enabled in ['true', 'always']:\n stateval = True\n else:\n stateval = False\n\n self.config_dict[sect]['enabled'] = stateval\n self.config_dict[sect]['cascade_list'] = cascade_list", "def get_classification_results(self):\n try:\n path = self.path\n print(path + \"classification_results.txt\")\n results = joblib.load(path + \"classification_results.txt\")\n print(results)\n return results\n\n # Call Classify_Data() if results are not found\n except EOFError as eoferror:\n print(\"Classification results not found. Generating results...\")\n return self.Classify_Data()\n except IOError as ioerror:\n print(\"Classification results not found. Generating results...\")\n return self.Classify_Data()", "def __grep(findwhat, filename, ignorecase, regexp):\n\t\tresult = []\n\t\ttry:\n\t\t\tencoding = \"utf8\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\texcept FileNotFoundError:\n\t\t\treturn result\n\t\texcept UnicodeDecodeError:\n\t\t\tencoding = \"latin-1\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\t\t\n\t\tif __search(findwhat, content, ignorecase, regexp):\n\t\t\tlines = open(filename,\"r\", encoding=encoding).readlines()\n\t\t\tlineNumber = 1\n\t\t\tfor line in lines:\n\t\t\t\tif __search(findwhat, line, ignorecase, regexp):\n\t\t\t\t\tresult.append((filename, lineNumber, line.strip()))\n\t\t\t\tlineNumber += 1\n\t\treturn result", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path, encoding='utf-8'):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>']\n data.append(sent)\n\n return data", "def select_regions(args):\n assert args.files, \"Need a set of fastq files\"\n assert args.out, \"Need --out\"\n region = os.path.abspath(args.region)\n workdir = 'select'\n safe_makedir(workdir)\n out_file = os.path.join(workdir, splitext_plus(args.out)[0] + \"_cpg.bed\")\n out_snp_file = os.path.join(workdir, splitext_plus(args.out)[0] + '_snp.bed')\n if not file_exists(out_file):\n with file_transaction(out_file) as tx_out:\n with open(tx_out, 'w') as out_handle:\n # print >> out_handle, \"chrom\\tstart\\tend\\tcu\\tcm\\tstrand\\tgene\\tsample\"\n for in_vcf in args.files:\n snp_file = in_vcf.replace(\"rawcpg\", \"rawsnp\")\n sample = splitext_plus(os.path.basename(in_vcf))[0].split(\"_\")[0]\n get_het(snp_file, region, sample, out_snp_file)\n res = pybedtools.BedTool(in_vcf).intersect(b=region, wo=True)\n # cmd = (\"bedtools intersect -u -a {in_vcf} -b {region} > {tx_tmp}\")\n # do.run(cmd.format(**locals()), \"selecting %s\" % in_vcf)\n\n for record in res:\n gene = record[-2]\n chrom, pos, info, header, frmt = record[0], int(record[1]), record[7], record[8], record[9]\n cs = info.split(';')[0].split('=')[1]\n frmt = dict(zip(header.split(\":\"), frmt.split(':')))\n if is_good_cpg(frmt):\n tag = \"%s-%s-%s-%s\" % (frmt['CU'], frmt['CM'], gene, sample)\n print >> out_handle, \"%s\\t%s\\t%s\\t%s\\t.\\t%s\" % (chrom, pos, pos + 1, tag, cs)", "def main():\n if len(sys.argv) >= 2:\n filename = sys.argv[1]\n else:\n print 'usage: ./Osmos.py file'\n sys.exit(1)\n with open(filename, 'rU') as file_handle:\n casenum = int(file_handle.readline())\n for case in range(1, casenum + 1):\n print handle_case(case, [file_handle.readline() for x in range(2)])", "def crawl(self, para_file):\n if not self.writer and not self.method:\n return\n fpara = open(para_file, 'r')\n\n pool = threadpool.ThreadPool(self.poolsize)\n parlst = list()\n for line in fpara:\n if self.stopped:\n break # Stop current crawling\n parlst.append(line.strip())\n if len(parlst) > 10:\n requests = threadpool.makeRequests(self.retrieve, parlst)\n map(pool.putRequest, requests)\n pool.wait()\n self.writer.flush()\n del parlst[:]\n\n #Flush the last part of lines in parlst\n if not self.stopped:\n requests = threadpool.makeRequests(self.retrieve, parlst)\n map(pool.putRequest, requests)\n pool.wait()\n self.writer.flush()\n\n fpara.close()\n self.writer.close()\n if not self.stopped:\n logging.info('Retrieving finished.')\n else:\n logging.info('Retrieving interrupted.')\n return", "def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)", "def ReturnChain(currentFile):\n with open(currentFile) as fileIn:\n lines = fileIn.readlines()\n Chainlist=[]\n for line in lines:\n if line.startswith('SEQRES'):\n List = line.split()\n Chainlist.append(List[2])\n #print(Chainlist)\n Chain = set(Chainlist)\n chain = sorted(list(Chain))\n return chain", "def input_file_parser(cls):\n \n # Loop through the file and store lines in an appropriate list that is passed to other class functions\n with open(cls.infile_name,'r') as infile:\n for line in infile: # Loop through the whole file\n if '$molecule' in line: # Search for a section header\n for line in infile: # Enter second loop over the lines in the section\n if '$end' in line: # If you find $end, stop loop as the section is finished\n break\n else: # Otherwise add the line to a list\n cls.molecule_lines.append(line.strip())\n if '$connection' in line: # Continue for other sections...\n for line in infile:\n if '$end' in line:\n break\n else:\n cls.connector_lines.append(line.strip())\n if '$options' in line: # Continue for other sections...\n for line in infile:\n if '$end' in line:\n break\n else:\n cls.options_lines.append(line.strip())\n\n return None", "def get(self, query_text):\n\n with open(FILEPATH, encoding='utf-8') as f:\n lines = f.readlines()\n\n new_search_result = models.SearchResult(query_text=query_text)\n\n occurrence_object_list = []\n\n for line in lines:\n line_index = lines.index(line)\n\n for m in re.finditer(re.escape(query_text), line, re.M|re.I):\n\n text_start = m.start()\n text_end = m.end()\n\n #Initial params for second part of sentence\n second_part = ''\n boundary_index = None\n line_count = 1\n search_line = line[text_start:].replace('\"', \"'\")\n\n #intial params for first part of sentence\n first_part = ''\n boundary_index_rev = None\n line_count_rev = -1\n search_line_rev = line[:text_start].replace('\"', \"'\")\n\n while boundary_index == None or boundary_index_rev == None:\n # Forward Scan of query_text sentence until punctuation or \\n\n if boundary_index == None:\n if (\".\" not in search_line and\n \"?\" not in search_line and\n \"!\" not in search_line):\n\n second_part += search_line\n try:\n search_line = lines[line_index\n + line_count].replace('\"', \"'\")\n except IndexError:\n boundary_index = search_line.index(\n search_line[-1]\n )\n else:\n if search_line == \"\\n\":\n boundary_index = lines[line_index +\n line_count -1].index(\"\\n\")\n\n line_count += 1\n else:\n for punc in (\".\", \"!\", \"?\"):\n try:\n boundary_index = search_line.index(punc)\n except ValueError:\n continue\n try:\n #If last word is in quotes, grab quote after period\n if search_line[boundary_index + 1] == \"'\":\n add_quote_index = 2\n else:\n add_quote_index = 1\n except IndexError:\n add_quote_index = 0\n second_part += search_line[:boundary_index\n + add_quote_index]\n\n # Backwards Scan of query_text sentence until punctuation or \\n\n if boundary_index_rev == None:\n if (\".\" not in search_line_rev and\n \"?\" not in search_line_rev and\n \"!\" not in search_line_rev):\n first_part = search_line_rev + first_part\n\n if search_line_rev == \"\\n\":\n boundary_index_rev = search_line_rev.index(\"\\n\")\n\n elif line_index + line_count_rev >= 0:\n search_line_rev = lines[line_index\n + line_count_rev].replace('\"', \"'\")\n line_count_rev -= 1\n else:\n boundary_index_rev = search_line_rev.index(\n search_line_rev[0]\n )\n else:\n for punc in (\".\", \"!\", \"?\"):\n try:\n boundary_index_rev = search_line_rev.rindex(\n punc)\n except ValueError:\n continue\n first_part = (search_line_rev[boundary_index_rev+1:]\n + first_part)\n\n sentence = (first_part + second_part).replace('\\n', ' ').strip()\n\n occurrence_object_list.append(\n models.Occurrence(\n search_result = new_search_result,\n line = line_index + 1,\n start = text_start + 1,\n end = text_end + 1,\n in_sentence = sentence\n )\n )\n\n #Add occurrences to SearchResult\n setattr(new_search_result, 'occurrences', occurrence_object_list)\n new_search_result.set_num_of_occurrences()\n response = marshal(new_search_result, search_fields)\n return jsonify(response)", "def findCategory( _file, _treeName, _conditioned = None ):\n\ttree = _file.Get(_treeName)\n\t#Name of the EDAnalyzer\n\tedmAnName = _treeName.split('/')[0]\n\t#Get MetaData \n\tmetadata = tree.GetUserInfo()[0].GetString().Data()\n\t# Isolating the EDAnalyzer with its parameters\n\tparameters = metadata[metadata.find(edmAnName+':'):]\n\tparameters = searchEndBlock( parameters )\n\t# Getting the categories\n\tcatStr = parameters[parameters.find('flags:'):]\n\tcatStr = searchEndBlock( catStr )\n\tcategories = []\n\ttriggerCat = []\n\tfor i in catStr.split('\\n'):\n\t\t# Pairing Triggers with the rest of categories \n\t\tif i.find('triggerObjectMatches') != -1:\n\t\t\ttriggerCat.append( i.split(':')[0].strip(' ') ) \n\t\telif i.find('string tracked') != -1 or i.find('InputTag tracked') != -1:\n\t\t\tcategories.append( i.split(':')[0].strip(' ') )\n\t# Checking if the conditional category is in the file\n\tif _conditioned:\n\t\tif _conditioned not in categories:\n\t\t\tmessage = \"\"\"\\033[1;33mWarning: The conditional category %s is not in the tree %s \\033[1;m\"\"\" % (_treeName,_conditioned)\n\t\t\t_conditioned = None\n\t\t\tprint message\n\t\telse:\n\t\t\tcategories.remove( _conditioned )\n\t\t\t#Adding the conditional category\t\t\t\n\t\t\tfor i in xrange(len(categories)):\n\t\t\t\tcategories[i] = _conditioned+':'+categories[i]\n\t#Add the trigger to build categories with to checks\n\tdeliverCat = None\n\tif len(triggerCat) == 0:\n\t\tif _conditioned:\n\t\t\tcategoriesSet = set(map( lambda x: x.split(':')[:-1][0], categories ))\n\t\t\tcategories = list(categoriesSet)\n\t\telse:\n\t\t\tcategories = [PLAIN]\n\t\tdeliverCat = categories\n\telse:\n\t\tdeliverCat = []\n\t\tfor cat in categories:\n\t\t\tdeliverCat.append( cat )\n\t\n\treturn deliverCat", "def handle(self):\n\n # We initiate the source we are going to parse to the Generate class.\n source = \"URL\"\n\n if self.catched.lower() not in PyFunceble.STATUS[\"list\"][\"invalid\"]:\n # The parsed status is not in the list of invalid.\n\n # We generate the status file with the catched status.\n Generate(self.catched, source).status_file()\n else:\n # The parsed status is in the list of invalid.\n\n # We generate the status file with the parsed status.\n Generate(self.catched, \"SYNTAX\").status_file()\n\n # We return the parsed status.\n return self.catched", "def crunch(self):\n while True:\n lst = self.want_line(r'\\s*\\.file\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.globl\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.ident\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.section\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.type\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.size\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(bss)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(data)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(text)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n break\n if osarch_is_amd64():\n self.crunch_amd64(lst)\n elif osarch_is_ia32():\n self.crunch_ia32(lst)\n self.__tag = None", "def main():\n filename = input()\n # Catching whether the file could be opened or not.\n try:\n file = open(filename)\n except:\n print (\"ERROR: Could not open file \" + filename)\n exit(1)\n data = file.read().split(\"\\n\")\n file.close()\n conference_set = ConferenceSet()\n # Skips lines that are empty or comment lines that begin with a # symbol.\n for line in data:\n if line != \"\":\n if line[0][0] != \"#\":\n team = Team(line)\n conference_set.add(team)\n conference_set.best()", "def search_in(self, file_object):\n for line_num, line in enumerate(file_object.readlines()):\n line = line.replace(\"\\n\", \"\").replace(\"\\r\", \"\") # remove new line char\n if re.match(self.regex, line):\n result = f\"~{os.path.abspath(file_object.name)}: {line} (line {line_num})\"\n if self.colored:\n result = self.highlight_phrase(result)\n print(result, file=sys.stdout)", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def read_conllx(\n self,\n input_data: List[str],\n merge_subtoken: bool,\n\n )-> Generator[Doc]:\n for sent in input_data.strip().split(\"\\n\\n\"):\n lines = sent.strip().split(\"\\n\")\n if lines:\n while lines[0].startswith(\"#\"):\n lines.pop(0)\n example = self._example_from_conllu_sentence(\n lines,\n merge_subtoken=merge_subtoken,\n )\n yield example", "def lookup(collated_file,query_file):\r\n x=open(query_file,\"r\")\r\n query=[]\r\n for i in x:\r\n i=i.replace(\"\\n\",\"\")\r\n query.append(i)\r\n y=open(collated_file,\"r\")\r\n collection=[]\r\n for i in y :\r\n i=i.replace(\"\\n\",\"\")\r\n i=i.split(\":\")\r\n collection.append(i)\r\n answer=[]\r\n for i in range(len(query)):\r\n answer.append(BinarySearch(collection,0,len(collection)-1,query[i]))\r\n y = open(\"song_ids.txt\", \"w\")\r\n for i in range(len(answer)):\r\n y.write(str(answer[i]) + \"\\n\")", "def Parse(filename: str) -> List[op.Operation]:\n\n # Open File\n file = open(filename, \"r\")\n\n # Get real file name\n index = filename.rfind(\"/\")\n if index == -1:\n index = filename.rfind(\"\\\\\")\n if index == -1:\n activeFile = filename\n else:\n activeFile = filename[index + 1:len(filename)]\n activeFileName = activeFile.split(sep=\".\")[0]\n\n print(f\"Parsing {activeFile}\")\n\n # Multi threading\n pool = ProcessPoolExecutor()\n futures = []\n\n lines = file.readlines()\n\n # start Threads\n lineNumber = 0\n for line in lines:\n futures.append(pool.submit(_ParseLine, line,\n lineNumber, activeFileName))\n lineNumber += 1\n\n wait(futures)\n successfullyParsed = []\n invalidCounter = 0\n commentCounter = 0\n # Put results in list\n for future in futures:\n result = future.result()\n # Remove invalid lines\n if isinstance(result, op.Invalid):\n invalidCounter += 1\n continue\n # Remove comments\n if isinstance(result, op.Comment):\n commentCounter += 1\n continue\n successfullyParsed.append(result)\n\n # Print for Debug\n if commentCounter > 0:\n print(f\"Ignoring {commentCounter} comments\")\n if invalidCounter > 0:\n print(Fore.YELLOW + f\"WARNING: {invalidCounter} invalid lines\")\n\n # Close File\n file.close()\n\n return successfullyParsed", "def analyze_files(proj_files, patterns):\n sec_in_projects = defaultdict(list)\n counter = 0\n for project, files in proj_files.items():\n\n if counter % 1000 == 0:\n print('Progress: {:.2%}'.format(counter/len(proj_files)))\n counter += 1\n\n for file in files:\n with open(file) as infile:\n content = infile.read()\n\n for _, tools in patterns.items():\n\n for tool, details in tools.items():\n sec = False\n for pattern in details['Patterns']:\n if pattern.lower() in content.lower():\n sec = True\n if sec:\n sec_in_projects[project].append(tool)\n for project, tools in sec_in_projects.items():\n sec_in_projects[project] = list(set(tools))\n return sec_in_projects", "def data_treatment(file_name):\n input_f = open(file_name, \"r\") # Read the file\n out_f = open(file_name[:-4]+\".out\",\"w\") # Create the output file\n cars = [] # Create a list where the car class will be stored\n for line in input_f:\n param = line_treatment(line) # Read the parameters per line\n car = Car(param[0], param[1]) # Create a class Car with the parameters\n check_availability(car) \n cars.append(car) # Append the car to the list of cars\n print (car, file=out_f) # Print in the output file\n input_f.close() # Close input file\n out_f.close() # Close output file\n return cars", "def build_filterset():\n with open(config.COMID_REFERENCE) as fil:\n return {int(line.strip()) for line in fil}", "def parse_BC5CDR(kb_data):\n\n print(\"Parsing BC5CDR corpus...\")\n output_BC5CDR = dict()\n\n if kb_data.kb == \"medic\":\n entity_type = \"Disease\"\n \n elif kb_data.kb == \"ctd_chemicals\":\n entity_type = \"Chemical\"\n\n corpus_dir = \"./retrieved_data/corpora/BioCreative-V-CDR-Corpus/CDR_Data/CDR.Corpus.v010516/\" \n filenames = [\"CDR_TrainingSet.PubTator.txt\", \"CDR_DevelopmentSet.PubTator.txt\", \"CDR_TestSet.PubTator.txt\"]\n\n for filename in filenames:\n with open(corpus_dir + filename, 'r', encoding=\"utf-8\") as corpus_file:\n data = corpus_file.readlines()\n corpus_file.close()\n\n for line in data:\n line_data = line.split(\"\\t\")\n file_id = line_data[0]\n \n if len(line_data) == 6 and line_data[4] == entity_type:\n mesh_id = \"MESH:\" + line_data[5].strip(\"\\n\") \n \n if mesh_id in kb_data.child_to_parent.keys():\n direct_ancestor = \"https://id.nlm.nih.gov/mesh/\" \\\n + kb_data.child_to_parent[mesh_id].strip(\"MESH:\")\n update_mesh_id = \"https://id.nlm.nih.gov/mesh/\" + line_data[5].strip(\"MESH:\").strip(\"\\n\")\n annotation = (line_data[3], line_data[1], line_data[2], update_mesh_id, direct_ancestor)\n output_BC5CDR = add_annotation_to_output_dict(file_id, annotation, output_BC5CDR)\n\n print(\"...Done!\")\n return output_BC5CDR", "def get_cities() -> list:\n results = []\n with open('src/craigslist_cities.txt', 'r', encoding='utf8') as file:\n for line in file:\n results.append(line.strip())\n return results", "def find_text_in_file(file_path, start_text, end_text):\r\n\r\n pkg_file=file(file_path,'r') \r\n for row in pkg_file: \r\n pid = find_text_in_string(row, start_text, end_text)\r\n if pid != None:\r\n pkg_file.close()\r\n return pid\r\n\r\n pkg_file.close() \r\n return None" ]
[ "0.65061325", "0.5781993", "0.5781708", "0.56171685", "0.55821574", "0.55591077", "0.55106205", "0.5485491", "0.5484373", "0.5472426", "0.54602283", "0.54551107", "0.5445359", "0.5434916", "0.5391505", "0.5335677", "0.53314865", "0.53281003", "0.5295112", "0.52804834", "0.52786887", "0.5269188", "0.5259035", "0.52532804", "0.52441585", "0.5222865", "0.5203549", "0.5193858", "0.51710933", "0.51574117", "0.5153334", "0.5153221", "0.51435953", "0.5132586", "0.5085911", "0.50715387", "0.5069625", "0.5069318", "0.5064177", "0.5037598", "0.5034049", "0.5030845", "0.5030078", "0.50283605", "0.5027081", "0.5025443", "0.5016835", "0.50139976", "0.499661", "0.4993736", "0.49926314", "0.49923038", "0.49808663", "0.49784294", "0.49722305", "0.4964027", "0.49541107", "0.4944549", "0.4941979", "0.49396354", "0.4933179", "0.49215272", "0.4917679", "0.49021903", "0.48983118", "0.4891663", "0.48892733", "0.48795658", "0.48780438", "0.48755047", "0.48746544", "0.48595604", "0.48576716", "0.48538187", "0.48532733", "0.48470443", "0.48327094", "0.48277995", "0.48048872", "0.4801052", "0.4799959", "0.47976837", "0.47947767", "0.4787378", "0.47861356", "0.47848964", "0.47824147", "0.47820675", "0.47815168", "0.47808707", "0.47794732", "0.47768334", "0.47743192", "0.477393", "0.47644764", "0.47640806", "0.4761653", "0.47600847", "0.47576872", "0.47469944" ]
0.6170222
1
Create sinusoidal timestep embeddings.
def timestep_embedding(timesteps, dim, max_period=10000): half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half ).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timestep_embedding(timesteps, dim, max_period=10000):\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)\n if dim % 2:\n embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:, :1])], axis=-1)\n return embedding", "def timestep_embedding(timesteps, dim, max_period=10000):\n half = dim // 2\n freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n return embedding", "def generate_time_series(num_points, num_dims):\n phase = np.random.randn()\n period = np.random.uniform()\n times = np.linspace(0, 10, num_dims)\n scale = np.random.exponential(size=num_points)\n return np.outer(scale, np.sin(times / period + phase))", "def get_timestep_embedding(timesteps, embedding_dim=128):\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float, device=timesteps.device) * -emb)\n\n emb = timesteps.float() * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)\n if embedding_dim % 2 == 1: # zero pad\n emb = F.pad(emb, [0,1])\n\n return emb", "def _initialize_embeddings(self):\n with tf.variable_scope(self.scope):\n init_temporal_s = np.sqrt(\n 6. / (self._config.nact_dict[\"num_s\"] + self._config.ndim_emb + 1))\n\n self.w_dt = tf.get_variable(\n name=\"w_dt\",\n shape=[1, self._config.ndim_emb],\n initializer=tf.initializers.random_uniform(\n -init_temporal_s, init_temporal_s))\n\n if self._config.embedding_type not in self._embedding_classes:\n raise ValueError(\n f\"Unknown embedding type: {self._config.embedding_type}.\")\n self.embedding = self._embedding_classes[self._config.embedding_type](\n self._config, self._embed_dim_dict)", "def morletft(s, w, w0, dt):\n \n p = 0.75112554446494251 # pi**(-1.0/4.0)\n wavelet = np.zeros((s.shape[0], w.shape[0]))\n pos = w > 0\n\n for i in range(s.shape[0]):\n n = normalization(s[i], dt)\n wavelet[i][pos] = n * p * np.exp(-(s[i] * w[pos] - w0)**2 / 2.0)\n \n return wavelet", "def embed_seq(self,X_seq,Y_seq):\n X_embed = -tr.ones(len(X_seq),self.og_signal_dim+self.og_noise_dim)\n # find trials of corresponding types\n pm_trials_bool = X_seq >= self.ntokens_og\n pm_trials = np.where(pm_trials_bool)\n og_trials = np.where(np.logical_not(pm_trials_bool))\n # take signal_dim (time,edim_signal_dim)\n pm_embeds = self.emat_pm[X_seq[pm_trials] - self.ntokens_og] \n og_embeds = self.emat_og[X_seq[og_trials]] \n # make noise (time,edim_noise)\n pm_noise = tr_noise_pm([len(pm_embeds),self.pm_noise_dim])\n og_noise = tr_noise_og([len(og_embeds),self.og_noise_dim])\n # cat signal_dim and noise (time,edim)\n pm_embeds = tr.cat([pm_embeds,pm_noise],-1)\n og_embeds = tr.cat([og_noise,og_embeds],-1)\n # put into respective positions\n X_embed[pm_trials] = pm_embeds\n X_embed[og_trials] = og_embeds \n # include batch dim \n X_embed = tr.unsqueeze(X_embed,1)\n Y_embed = tr.unsqueeze(tr.LongTensor(Y_seq),1)\n return X_embed,Y_embed", "def context(self, velocity, sini, nangles, angles, stokesi_mn, stokesi_residual, T_max, wavelength):\n \n n_batch, n_steps, n_lambda = stokesi_residual.shape \n\n # Clone the mean spectrum for all timesteps and flatten batch+time to apply the encoder for all timesteps and batch\n tmp_wavelength = wavelength[:, None, :].expand(-1, n_steps, n_lambda).reshape(-1, n_lambda).unsqueeze(-1)\n tmp_mn = stokesi_mn[:, None, :].expand(-1, n_steps, n_lambda).reshape(-1, n_lambda).unsqueeze(-1)\n tmp_residual = stokesi_residual.reshape(-1, n_lambda).unsqueeze(-1)\n \n # Serialize all Stokes parameters for all time steps and treat wavelengths as a sequence\n tmp_stokes = torch.cat([tmp_wavelength, tmp_mn, tmp_residual], dim=-1)\n\n # Compute masks, both spectral and in time\n mask_spectral = (tmp_mn[:, :, 0] == 0.0).unsqueeze(1).unsqueeze(2)\n mask_time = (angles != -999).unsqueeze(-1)\n\n # First embedding\n # [B*T, S, 3] -> [B*T, S, n_emb]\n out = self.StokesEmbedding(tmp_stokes)\n \n # First Transformer Encoder to encode spectral information\n # The mask needs to indicate with False those spectral points to attend to\n # [B*T, S, n_emb] -> [B*T, S, n_emb]\n out = self.SpectralEncoder(out, mask_spectral)\n \n # [B*T, S, n_emb] -> [B*T, n_emb] -> [B, T, n_emb]\n out = torch.mean(out, dim=1).reshape((n_batch, n_steps, -1))\n\n # Now we mask all unavailable times\n out = out.masked_fill(mask_time == 0, 0.0)\n \n # Add an embedding based on the phase angle using FiLM\n film_angles_gamma, film_angles_beta = self.film_angles(angles[:, :, None])\n film_angles_gamma = film_angles_gamma.masked_fill(mask_time == 0, 0.0)\n film_angles_beta = film_angles_beta.masked_fill(mask_time == 0, 0.0)\n\n out = out * film_angles_gamma + film_angles_beta\n \n # Second Transformer Encoder to encode time information\n # It produce a unique latent vector by attending to all timesteps\n # We apply a mask to only attend to the time steps in each element of the batch\n # The mask needs to indicate with False those spectral points to attend to\n # [B, T, n_emb] -> [B, T, n_emb] \n out = self.TimeEncoder(out, (~mask_time[:, :, 0]).unsqueeze(1).unsqueeze(2))\n\n # Mask all unavailable times\n out = out.masked_fill(mask_time == 0, 0.0)\n\n # Create a unique context vector by adding all times \n context = torch.sum(out, dim=1) / nangles[:, None]\n \n # Add a conditioning using FiLM for velocity and sini\n tmp = torch.cat([velocity[:, None], sini[:, None]], dim=-1)\n film_rotation_gamma, film_rotation_beta = self.film_rotation(tmp)\n \n context = context * film_rotation_gamma + film_rotation_beta\n\n return context", "def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")", "def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings", "def build_lstm8(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False,\n name='eembed'\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False, name='td8')))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi'))\n model.add(Flatten(name='flaaten'))\n model.add(BatchNormalization())\n model.add(Dropout(settings['dropout'] / 2.0))\n model.add(Dense(shape['n_class'], activation='sigmoid'))\n xprint('build_lstm8: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def impulse_data(sample_rate=512,psd_segment_length=60):\n epoch = 1153742417.0\n ts_data = numpy.zeros(sample_rate * psd_segment_length)\n ts_data = types.TimeSeries(ts_data, delta_t=1.0/sample_rate, epoch=epoch)\n return ts_data", "def _add_seq2seq(self):\n hps = self._hps\n vsize = self._vocab.size() # size of the vocabulary\n \n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n\n\n with tf.variable_scope('embedding'):\n if hps.pretrained_embeddings:\n word2vec = load_embeddings(hps.embeddings_path, self._vocab.word2id, hps.rand_unif_init_mag)\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=tf.constant_initializer(word2vec))\n # self.assign_embedding = tf.assign(self.embedding, word2vec)\n else:\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n if hps.mode==\"train\": self._add_emb_vis(self.embedding) # add to tensorboard\n\n # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_enc_inputs = tf.nn.embedding_lookup(self.embedding, self._enc_batch)\n if self._hps.hier:\n enc_batch_sections = tf.unstack(self._enc_batch_sections, axis=1)\n sec_emb_enc_inputs = [tf.nn.embedding_lookup(self.embedding, section)\n for section in enc_batch_sections]\n # list length max_dec_steps containing shape (batch_size, emb_size)\n emb_dec_inputs = [tf.nn.embedding_lookup(self.embedding, x)\n for x in tf.unstack(self._dec_batch, axis=1)]\n\n\n # Hierarchical attention model\n if self._hps.hier:\n with tf.variable_scope('encoder'), tf.device(self._next_device()):\n sec_enc_outs = []\n states_fw = []\n states_bw = []\n states = []\n\n # level 1, encode words to sections\n with tf.variable_scope(\"word_level_encoder\", reuse=tf.AUTO_REUSE) as scope:\n encoder_outputs_words = []\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n fw_st, bw_st = None, None\n if self._hps.use_do: # DropOut\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n for i in range(self._hps.num_sections):\n encoder_tmp_output, (fw_st, bw_st) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, inputs=sec_emb_enc_inputs[i], dtype=tf.float32,\n sequence_length=self._batch_sections_len[:,i], swap_memory=True, initial_state_bw=bw_st, initial_state_fw=fw_st)\n # concatenate the forwards and backwards states\n encoder_tmp_output = tf.concat(axis=2, values=encoder_tmp_output) #shape=[batch x seq_len x hidden_size]\n \n encoder_outputs_words.append(encoder_tmp_output)\n # instead of concating the fw and bw states, we use a ff network\n combined_state = self._reduce_states(fw_st, bw_st)\n states.append(combined_state)\n scope.reuse_variables()\n \n # level 2, encode sections to doc\n encoder_outputs_words = tf.stack(encoder_outputs_words, axis=1) # shape [batch x num_sections x seq_len x hidden_size]\n shapes = encoder_outputs_words.shape\n encoder_outputs_words = tf.reshape(encoder_outputs_words, (shapes[0].value, -1, shapes[-1].value)) #shape=[batch x (seq_len * num_sections) x hidden_size]\n\n doc_sections_h = tf.stack([s.h for s in states], axis=1) # [batch x num_sections x hidden_size]\n doc_sections_c = tf.stack([s.c for s in states], axis=1) # [batch x num_sections x hidden_size]\n\n with tf.variable_scope(\"section_level_encoder\"):\n if FLAGS.section_level_encoder == 'RNN':\n cell_fw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do:\n cell_fw_1 = tf.contrib.rnn.DropoutWrapper(cell_fw_1, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw_1 = tf.contrib.rnn.DropoutWrapper(cell_bw_1, output_keep_prob=1.0 - self._hps.do_prob)\n encoder_output_sections, (fw_st_2, bw_st_2) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw_1, cell_bw_1, inputs=doc_sections_h, sequence_length=self._doc_sec_lens, dtype=tf.float32, swap_memory=True)\n encoder_output_sections = tf.concat(axis=2, values=encoder_output_sections)\n doc_sections_state = self._reduce_states(fw_st_2, bw_st_2)\n else:\n if FLAGS.section_level_encoder == 'AVG': # average section cells\n doc_sections_state_h = tf.reduce_mean(doc_sections_h, axis=1)\n doc_sections_state_c = tf.reduce_mean(doc_sections_c, axis=1)\n elif FLAGS.section_level_encoder == 'FF': # use a feedforward network to combine section cells\n doc_sections_state_h = tf.reshape([doc_sections_h.shape[0].eval(), -1])\n doc_sections_state_h = tf.layers.dense(\n inputs=doc_sections_state_h,\n units=self._hps.hidden,\n activation=tf.nn.relu) \n doc_sections_state_c = tf.reshape([doc_sections_c.shape[0].eval(), -1])\n doc_sections_state_c = tf.layers.dense(\n inputs=doc_sections_state_c,\n units=self._hps.hidden,\n activation=tf.nn.relu)\n else:\n raise AttributeError('FLAGS.section_level_encoder={} is not a valid option'.format(FLAGS.section_level_encoder))\n doc_sections_state = tf.contrib.rnn.LSTMStateTuple(doc_sections_state_c, doc_sections_state_h)\n encoder_output_sections = doc_sections_h \n \n elif not self._hps.multi_layer_encoder:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n # concatenate the forwards and backwards states\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs)\n \n # stack n layers of lstms for encoder\n elif self._hps.multi_layer_encoder:\n # TODO: check\n for layer_i in xrange(self._hps.enc_layers):\n with tf.variable_scope('encoder%d'%layer_i), tf.device(\n self._next_device()):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do: # add dropout\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n emb_enc_inputs, (fw_st, bw_st) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n emb_enc_inputs = tf.concat(axis=2, values=emb_enc_inputs)\n encoder_outputs = emb_enc_inputs\n \n if self._hps.hier:\n self._enc_sec_states = encoder_output_sections\n self._enc_states = encoder_outputs_words \n else:\n self._enc_states = encoder_outputs\n self._enc_sec_states = None\n \n # convert the encoder bidirectional hidden state to the decoder state\n # (unidirectional) by an MLP\n if self._hps.hier:\n self._dec_in_state = doc_sections_state\n else:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n self._dec_in_state = self._reduce_states(fw_st, bw_st) \n \n # Add the decoder\n\n with tf.variable_scope('decoder'), tf.device(self._next_device()):\n cell = tf.contrib.rnn.LSTMCell(\n self._hps.hidden_dim,\n state_is_tuple=True,\n initializer=self.rand_unif_init)\n \n # We need to pass in the previous step's coverage vector each time\n prev_coverage = self.prev_coverage\\\n if hps.mode==\"decode\" and self._hps.coverage \\\n else None \n \n \n if self._hps.hier:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.attn_dists_sec =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n self._enc_sec_states,\n num_words_section=self._batch_sections_len,\n enc_padding_mask=self._enc_padding_mask,\n enc_section_padding_mask=self._enc_section_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n temperature=self._hps.temperature\n )\n \n else:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, _ =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n encoder_section_states=None,\n num_words_section=None,\n enc_padding_mask=self._enc_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n ) \n \n\n # Project decoder output to vocabulary\n with tf.variable_scope('output_projection'), tf.device(self._next_device()):\n if self._hps.output_weight_sharing:\n # share weights of embedding layer with projection\n # self.embedding is in shape [vsize, hps.emb_dim]\n w_proj = tf.get_variable('w_proj', [self._hps.emb_dim, self._hps.hidden_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n w = tf.tanh(tf.transpose(tf.matmul(self.embedding, w_proj))) # shape = [vsize, hps.hidden_dim]\n \n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n else: \n w = tf.get_variable('w', [self._hps.hidden_dim, vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # vocabulary score at each decoder step\n vocab_scores = []\n for i,output in enumerate(decoder_outputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n vocab_scores.append(tf.nn.xw_plus_b(output, w, b)) # apply the linear layer\n\n # the final vocab distribution for each decoder time step\n # shape of each element is [batch_size, vsize]\n vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] \n\n \n # pointing / generating\n if FLAGS.pointer_gen:\n final_dists = self._calc_final_dist(vocab_dists, self.attn_dists)\n# log_dists = [tf.log(dist) for dist in final_dists]\n else:\n# log_dists = [tf.log(dist) for dist in vocab_dists]\n final_dists = vocab_dists\n \n\n # Calculate Losses:\n \n if self._hps.mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'), tf.device(self._next_device()):\n if FLAGS.pointer_gen:\n # Calculate the loss per step\n # This is fiddly; we use tf.gather_nd to pick out the gold target words\n # will be list length max_dec_steps containing shape (batch_size)\n loss_per_step = [] \n batch_nums = tf.range(0, limit=hps.batch_size) # shape (batch_size)\n for dec_step, dist in enumerate(final_dists):\n # The indices of the target words. shape (batch_size)\n targets = self._target_batch[:,dec_step] \n indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n # shape (batch_size). loss on this step for each batch\n gold_probs = tf.gather_nd(dist, indices)\n losses = -tf.log(gold_probs)\n loss_per_step.append(losses)\n\n # Apply dec_padding_mask mask and get loss\n self._loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n \n\n else: # baseline model\n # this applies softmax internally\n self._loss = tf.contrib.seq2seq.sequence_loss(\n tf.stack(vocab_scores, axis=1), self._target_batch, self._dec_padding_mask) # this applies softmax internally\n\n tf.summary.scalar('loss', self._loss)\n\n # Calculate coverage loss from the attention distributions\n if self._hps.coverage:\n with tf.variable_scope('coverage_loss'):\n self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n tf.summary.scalar('coverage_loss', self._coverage_loss)\n self._total_loss = self._loss + self._hps.cov_loss_wt * self._coverage_loss\n tf.summary.scalar('total_loss', self._total_loss)\n \n # ---------------------------/\n\n\n if self._hps.mode == \"decode\":\n assert len(final_dists) == 1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n final_dists = final_dists[0]\n topk_probs, self._topk_ids = tf.nn.top_k(final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n self._topk_log_probs = tf.log(topk_probs)", "def init_embedding(size=50):\n vector = np.random.normal(0.0, 0.01, size)\n return vector", "def generate_singlesine(time = 0, samples_nb = 1000, rep_frequency = 10 , pulse_frequency = 50, amplitude = 1 , edge = 1, phase_offset = 0, noise = 0):\r\n\r\n\tif edge not in [0,1]:\r\n\t\tprint(colorama.Back.RED + colorama.Style.BRIGHT + \"ERROR: invalid phase (either 0 for a rising or a 1 for a falling edge) , exit.\"+ colorama.Style.NORMAL + colorama.Back.RESET)\r\n\t\t# Return code for error (empty input file):\r\n\t\tsys.exit(10)\r\n\r\n\r\n\t#Creating empty lists for t and y\r\n\tt = np.zeros(samples_nb)\r\n\r\n\tif noise == 0:\r\n\t\ty = np.zeros(samples_nb)\r\n\telse:\r\n\t\ty = np.random.normal(0, noise, samples_nb)\r\n\r\n\t#Determining the interval limits of t\r\n\tt_limit =1/float(rep_frequency*2)\r\n\r\n\t#Updating the t interval\r\n\tt = np.arange(-samples_nb/2,samples_nb/2)/float(samples_nb*rep_frequency) + 1/float(samples_nb*rep_frequency)\r\n\r\n\r\n\t#calculating the time_shift\r\n\t#delta_t = phase_offset/(2*np.pi*pulse_frequency)\r\n\tdelta_t = phase_offset/(2*np.pi*rep_frequency)\r\n\r\n\t#Setting the pulse amplitude\r\n\ta_pulse = amplitude\r\n\tif edge == 1:\r\n\t\ta_pulse *= -1\r\n\r\n\t#Calculating the pulse limits\r\n\tp_limit = 1/float(2*pulse_frequency)\r\n\tp_interval = list ([-p_limit,p_limit])\r\n\r\n\r\n\tfor n in range (0,len(t)) :\r\n\t\tif (t[n] + delta_t) > p_interval[0] and (t[n] + delta_t) <= p_interval[1]:\r\n\t\t\ty[n] += a_pulse * np.sin(2*np.pi*pulse_frequency*(t[n]+delta_t))\r\n\r\n\r\n\r\n\t#plt.plot(t,y)\r\n\t#plt.show()\r\n\r\n\tresult = {}\r\n\tresult ['time'] = time\r\n\tresult ['t'] = t\r\n\tresult ['y'] = y\r\n\r\n\treturn result", "def build_input_embed(self, n_input, t_input):\n n_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ntoken, self.n_embed_dim], minval=-0.05, maxval=0.05), name='n_embed_matrix')\n t_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ttoken, self.t_embed_dim], minval=-0.05, maxval=0.05), name='t_embed_matrix')\n n_input_embedding = tf.nn.embedding_lookup(n_embed_matrix, n_input)\n t_input_embedding = tf.nn.embedding_lookup(t_embed_matrix, t_input)\n return n_input_embedding, t_input_embedding", "def createTimeLapseSurvey(self, fnames_obs, fnames_sim):\n return ValueError('Not yet implemented')", "def make_ft_axis(length, dt, undersampling = 0, normalized_to_period = 0, zero_in_middle = False):\n\n if normalized_to_period == 0: \n resolution = 1 / ( 3e-5 * length * dt)\n else:\n resolution = normalized_to_period / (length * dt)\n \n array = numpy.arange((undersampling)*length/2, (undersampling+1)*length/2)*resolution\n \n if zero_in_middle == False:\n return numpy.concatenate((array,-numpy.flipud(array)))\n else:\n return numpy.concatenate((-numpy.flipud(array), array))", "def syns(alpha=0.1, rate=10, delay=0, dur=50, amp=1.0, dt=0.020, N=1, mindur = 120, makewave=True):\n deadtime = 0.7\n if dur + delay < mindur:\n tvec = np.arange(0.0, mindur , dt)\n else:\n tvec = np.arange(0.0, dur+delay , dt)\n npts = len(tvec)\n ta = np.arange(0.0, 20.0, dt)\n aw = ta * alpha* np.exp(-ta/alpha)/alpha # alpha waveform time course\n spt = [[]]*N # list of spike times\n wave = np.array([]) # waveform\n sptime=[]\n for j in range(0,N):\n done = False\n t=0.0\n nsp = 0\n while not done:\n a = np.random.sample(1)\n if t < delay:\n t = delay\n continue\n if t >= delay and t <= (delay+dur):\n ti = -np.log(a)/(rate/1000.0) # convert to exponential distribution with rate\n if ti < deadtime:\n continue\n t = t + ti # running time\n if t > delay+dur:\n done = True\n continue\n if nsp is 0:\n sptime = t\n nsp = nsp+1\n else:\n sptime = np.append(sptime, t)\n nsp = nsp+1\n if j is 0:\n wavej = np.zeros(len(tvec))\n for i in range(0,len(sptime)):\n st = int(sptime[i]/dt)\n wavej[st] = wavej[st] + 1\n spt[j] = sptime\n\n if makewave:\n w = np.convolve(wavej, aw/max(aw))*amp\n if len(w) < npts:\n w = np.append(w, np.zeros(npts-len(w)))\n if len(w) > npts:\n w = w[0:npts]\n if j is 0:\n wave = w\n else:\n wave = wave + w\n return (spt, wave, tvec, N)", "def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding", "def build_sentence_encoders(tparams, options):\n opt_ret = dict()\n trng = RandomStreams(1234)\n\n #xs, masks, sents_all = [], [], []\n in_outs = []\n\n langs = options['langs']\n for lang in langs:\n # description string: #words x #samples\n # forward\n x = tensor.matrix('x_%s'%lang, dtype='int64')\n mask = tensor.matrix('x_mask_%s'%lang, dtype='float32')\n\n n_timesteps = x.shape[0]\n n_samples = x.shape[1]\n\n # Word embedding (forward)\n emb = tparams['Wemb_%s'%lang][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['bidirectional_enc']:\n # backward RNN\n x_r = x[::-1]\n mask_r = mask[::-1]\n emb_r = tparams['Wemb_%s'%lang][x_r.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['use_dropout']:\n retain_probability_emb = 1-options['dropout_embedding']\n retain_probability_hidden = 1-options['dropout_hidden']\n retain_probability_source = 1-options['dropout_source']\n rec_dropout = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n source_dropout = theano.shared(numpy.float32(retain_probability_source))\n emb *= source_dropout\n if options['bidirectional_enc']:\n embr *= source_dropout\n else:\n rec_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n\n # Encode sentences\n if options['encoder_%s'%lang] == 'bow':\n sents = (emb * mask[:,:,None]).sum(0)\n else:\n # iteratively push input from first hidden layer until the last\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask,\n emb_dropout=emb_dropout, rec_dropout=rec_dropout)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj = layer_below\n\n if options['bidirectional_enc']:\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_r_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb_r if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask_r,\n emb_dropout=emb_dropout_r, rec_dropout=rec_dropout_r)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj_r = layer_below\n\n # use last hidden state of forward and backward RNNs\n sents = concatenate([proj[0][-1],proj_r[0][-1]], axis=proj[0].ndim-2)\n else:\n sents = proj[0][-1]\n\n if options['use_dropout']:\n sents *= shared_dropout_layer((n_samples, options['dim']), use_noise, trng, retain_probability_hidden)\n\n # project sentences into multimodal space\n sents_mm = get_layer('ff')[1](tparams, sents, options, prefix='ff_sentence_mm', activ='linear')\n if not 'attention_type' in options or options['attention_type'] == 'dot':\n sents_mm = l2norm(sents_mm)\n\n if options['use_dropout']:\n sents_mm *= shared_dropout_layer((n_samples, options['dim_multimodal']), use_noise, trng, retain_probability_hidden)\n\n # outputs per language\n in_outs.append(([x, mask], sents_mm))\n\n return trng, in_outs", "def _multiple_stft(X, frame_length, win=None, step=None):\n n_samples = X.shape[0]\n n_features = X.shape[1]\n n_frequencies = frame_length\n \n if step is None:\n step = frame_length / 4\n elif step > frame_length:\n print \"step is longer than frame_length.\\n\"\n sys.exit()\n elif step <= 0:\n print \"step is invalid.\\n\"\n sys.exit()\n \n n_frames = int(sp.ceil(float(n_features - frame_length + step) / step))\n\n S=np.zeros([n_samples, n_frames, n_frequencies], dtype=np.complex64)\n for i in range(n_samples):\n S[i] = stft(X[i], frame_length, win, step)\n\n return S", "def generate_synthetic_embedding(d, components, spherical=True, maximum_variance=None, center=None, noise=None, lam=1.e-2):\n assert d > 1, (\"Dimensionality must be positive and bigger than 1!\", d)\n print(\"Generating embedding of size: \", d)\n\n if maximum_variance is None:\n maximum_variance = np.sqrt(d)\n\n if args.random_seed:\n np.random.set_seed(args.random_seed)\n\n emb_mu = np.random.rand(components, d) * d * 5\n emb_sigma = np.random.rand(components, d) * d # Can be unfolded column-wise because diagonal covariance matrices\n emb_sigma = np.absolute(emb_sigma) # Making the covariance matrix psd! (cov matrix cannot define a negative eigenvalues)\n\n print(\"Matrices are of shape: \", emb_mu.shape, emb_sigma.shape)\n\n if spherical:\n # TODO:, how to make this spherical!\n elementswise_norm = np.linalg.norm(emb_mu, ord=2, axis=1, keepdims=True)\n # print(\"elementwise norm: \", elementswise_norm)\n # emb_mu = np.divide(emb_mu, elementswise_norm)\n # emb_sigma = np.divide(emb_sigma, maximum_variance)\n\n # Create a list from this..\n\n # Finally, make the covariance matrix numerically stable for psd operations....\n\n # Conver to tensorflow tensor here...\n emb_mus = [emb_mu[i, :].reshape((1, -1)) for i in range(components)]\n emb_sigmas = [emb_sigma[i, :] * np.identity(d) + (lam * np.identity(d)) for i in range(components)]\n\n emb_mus = [tf.convert_to_tensor(x, dtype=args.dtype) for x in emb_mus]\n emb_sigmas = [tf.convert_to_tensor(x, dtype=args.dtype) for x in emb_sigmas]\n\n return emb_mus, emb_sigmas", "def make_transformer_timing_signal(inp, min_timescale=1.0, max_timescale=1e4, offset=0, inp_reverse=None):\n with tf.name_scope(\"timing_signal\"):\n ninp = tf.shape(inp)[1]\n hid_size = tf.shape(inp)[2]\n\n position = tf.to_float(tf.range(ninp))[None, :]\n\n if offset == 'random':\n BIG_LEN = 32000\n offset = tf.random_uniform(tf.shape(position), minval=-BIG_LEN, maxval=BIG_LEN, dtype=tf.int32)\n\n # force broadcasting over batch axis\n if isinstance(offset * 1, tf.Tensor): # multiply by 1 to also select variables, special generators, etc.\n assert offset.shape.ndims in (0, 1, 2)\n new_shape = [tf.shape(offset)[i] for i in range(offset.shape.ndims)]\n new_shape += [1] * (2 - len(new_shape))\n offset = tf.reshape(offset, new_shape)\n\n position += tf.to_float(offset)\n\n if inp_reverse is not None:\n position = tf.multiply(\n position,\n tf.where(\n tf.equal(inp_reverse, 0),\n tf.ones_like(inp_reverse, dtype=tf.float32),\n -1.0 * tf.ones_like(inp_reverse, dtype=tf.float32)\n )[:, None, None] # (batch_size * ninp * dim)\n )\n\n return make_sinusoid_signal(position, hid_size, min_timescale=min_timescale, max_timescale=max_timescale)", "def stft_2_waveform(stft, frame_length=512, frame_step=128, n_mel_bins=None,\n mel_lower_hertz_edge=0.0, mel_upper_hertz_edge=8000.0):\n\n if len(stft.shape) == 3:\n stft = tf.expand_dims(stft, 0)\n\n # Set the nyquist frequency to zero (the band we earlier removed).\n # This is also commonly done in these other papers.\n real = stft[:, :, :, 0]\n img = stft[:, :, :, 1]\n\n if n_mel_bins:\n real = _mel_to_linear_scale(\n real, frame_length//2, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n img = _mel_to_linear_scale(\n img, frame_length//2, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n\n real = tf.pad(real, [[0, 0], [0, 0], [0, 1]], constant_values=0)\n img = tf.pad(img, [[0, 0], [0, 0], [0, 1]], constant_values=0)\n\n stft = tf.complex(real, img)\n inv_window_fn = tf.signal.inverse_stft_window_fn(frame_step, forward_window_fn=WINDOW_FN)\n waveform = tf.signal.inverse_stft(\n stft, frame_length=frame_length, frame_step=frame_step, window_fn=inv_window_fn\n )\n return waveform", "def mk_sen_stimulus(task_info, arrays=False):\n # set seed with np - for standalone mode brian's seed() is not sufficient!\n if task_info['sim']['replicate_stim']:\n # replicated stimuli across iters\n np.random.seed(123)\n else:\n # every iter has different stimuli\n np.random.seed(task_info['seed'])\n\n # TimedArray stim\n if not task_info['sim']['online_stim']:\n # simulation params\n nn = int(task_info['sen']['N_E'] * task_info['sen']['sub']) # no. of neurons in sub-pop1\n stim_dt = task_info['sim']['stim_dt']\n runtime = unitless(task_info['sim']['runtime'], stim_dt)\n stim_on = unitless(task_info['sim']['stim_on'], stim_dt)\n stim_off = unitless(task_info['sim']['stim_off'], stim_dt)\n flip_stim = task_info['sim']['ramp_stim']\n stim_time = get_this_time(task_info, runtime, include_settle_time=True)\n tps = stim_off - stim_on # total stim points\n\n # stimulus namespace\n paramstim = params.get_stim_params(task_info)\n tau = unitless(paramstim['tau_stim'], stim_dt) # OU time constant\n c = paramstim['c']\n I0 = paramstim['I0']\n I0_wimmer = paramstim['I0_wimmer']\n mu1 = paramstim['mu1']\n mu2 = paramstim['mu2']\n if task_info['sim']['ramp_stim']:\n # smooth the stim onset with a positive exponential decay\n tau_ramp = 20e-3 / unitless(stim_dt, second, as_int=False)\n mu1 *= (1 - np.exp(-np.arange(tps) / tau_ramp))\n mu2 *= (1 - np.exp(-np.arange(tps) / tau_ramp))\n mu1 = mu1[None, :]\n mu2 = mu2[None, :]\n sigma_stim = paramstim['sigma_stim']\n sigma_ind = paramstim['sigma_ind']\n\n # common and private part\n z1 = np.tile(get_OUstim(tps, tau, flip_stim), (nn, 1))\n z2 = np.tile(get_OUstim(tps, tau, flip_stim), (nn, 1))\n np.random.seed(np.random.randint(10000))\n zk1 = get_OUstim(tps * nn, tau, flip_stim).reshape(nn, tps)\n zk2 = get_OUstim(tps * nn, tau, flip_stim).reshape(nn, tps)\n\n # stim2TimedArray with zero padding if necessary\n i1 = I0 + I0_wimmer * (c * mu1 + sigma_stim * z1 + sigma_ind * zk1)\n i2 = I0 + I0_wimmer * (c * mu2 + sigma_stim * z2 + sigma_ind * zk2)\n stim1 = i1.T.astype(np.float32)\n stim2 = i2.T.astype(np.float32)\n i1t = np.concatenate((np.zeros((stim_on, nn)), stim1,\n np.zeros((runtime - stim_off, nn))), axis=0).astype(np.float32)\n i2t = np.concatenate((np.zeros((stim_on, nn)), stim2,\n np.zeros((runtime - stim_off, nn))), axis=0).astype(np.float32)\n Irec = TimedArray(np.concatenate((i1t, i2t), axis=1)*amp, dt=stim_dt)\n\n if arrays:\n stim1 = i1t.T.astype(np.float32)\n stim2 = i2t.T.astype(np.float32)\n stim_fluc = np.hstack((np.zeros(stim_on), z1[0] - z2[0], np.zeros(runtime-stim_off)))\n return Irec, stim1, stim2, stim_time, stim_fluc\n\n return Irec", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def generate_timeseries(F=F, H=H, stop=2000, x0=np.array([-0.72, -0.64]),\n R_v=np.eye(2)*0, R_n=np.eye(2)*0.001):\n dim = 2 # Number of dimensions for the system\n U, Y = [], []\n\n x = x0\n for k in range(stop):\n U.append(u(k, dim))\n x = F(x, U[-1]) + np.random.multivariate_normal(np.zeros(dim), R_v)\n Y.append(H(x) + np.random.multivariate_normal(np.zeros(dim), R_n))\n\n return U, Y, R_v, R_n", "def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)", "def waveform_2_stft(waveform, frame_length=512, frame_step=128, n_mel_bins=None,\n mel_lower_hertz_edge=0.0, mel_upper_hertz_edge=8000.0):\n\n if len(waveform.shape) == 1:\n waveform = tf.expand_dims(waveform, 0)\n\n stft = tf.signal.stft(\n waveform, frame_length=frame_length, frame_step=frame_step,\n pad_end=True, window_fn=WINDOW_FN\n )\n\n # Truncate the nyquist frequency, commonly done in other papers,\n # also makes computation easier.\n real = tf.math.real(stft)[:, :, 0:-1]\n img = tf.math.imag(stft)[:, :, 0:-1]\n\n if n_mel_bins:\n real = _linear_to_mel_scale(\n real, n_mel_bins, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n img = _linear_to_mel_scale(\n img, n_mel_bins, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n\n return tf.concat([tf.expand_dims(real, 3),\n tf.expand_dims(img, 3)], axis=-1)", "def source_embedding_fairseq(self):\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.params[\"feature.dim\"], self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def build_inputs(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n latent_dim,\n vocab_sizes,\n noise=False,\n mask=False,\n):\n latlon_input, latlon_embed = build_inputs_latlon(timesteps, latlon_dense_units)\n inputs = [latlon_input]\n embeddings = [latlon_embed]\n for key, val in vocab_sizes.items():\n cat_input, cat_embed = build_inputs_cat(timesteps, val, key)\n inputs.append(cat_input)\n embeddings.append(cat_embed)\n concat_input = layers.Concatenate(axis=2)(embeddings)\n unstacked = layers.Lambda(lambda x: tf.unstack(x, axis=1))(concat_input)\n d = layers.Dense(\n units=concat_dense_units,\n activation=\"relu\",\n kernel_initializer=initializers.he_uniform(seed=1),\n name=\"emb_trajpoint\",\n )\n if noise:\n noise_input = layers.Input(shape=(latent_dim,), name=\"input_noise\")\n inputs.append(noise_input)\n dense_outputs = [d(layers.Concatenate(axis=1)([x, noise_input])) for x in unstacked]\n else:\n dense_outputs = [d(x) for x in unstacked]\n if mask:\n inputs.append(layers.Input(shape=(timesteps, 1), name=\"input_mask\"))\n emb_traj = layers.Lambda(lambda x: tf.stack(x, axis=1))(dense_outputs)\n return (inputs, emb_traj)", "def embed(T, Te=2):\n N = T.shape[0] + 2\n\n # Create a matrix with NxN of zeros \n # Append the target matrix into the center of the zeros matrix\n Tfull = np.zeros((N,N))\n Tfull[1:-1, 1:-1] = T\n return Tfull", "def make_ith_sft(self, i):\n self.run_makefakedata_v4(\n self.mid_times[i],\n self.lineFreqs[i],\n self.linePhis[i],\n self.lineh0s[i],\n self.tmp_outdir,\n )", "def time_series(t, f=0.02):\n T = t.size\n # Seasonal component and time-varying trend component\n ys = np.sin(2 * np.pi * f * t) * 0.6 + np.sin(1 / 5 * 2 * np.pi * f * t) * 0.2\n # Amplitude modulation component\n amp_mod = 0.5 * np.sin(1 / 6 * 2 * np.pi * f * t) + 0.8\n ys *= amp_mod\n ys = np.reshape(ys, (T,1))\n return ys", "def generate_stimuli(num_trials=10, stim_dur=0.08, fs=24414., rms=0.01,\n ramp_noise=0.03, ramp_tone=0.06,\n output_dir=None, save_as='mat', rand_seed=0):\n if rand_seed is None:\n rng = np.random.RandomState()\n else:\n rng = np.random.RandomState(rand_seed)\n\n # check input arguments\n if save_as not in ['dict', 'wav', 'mat']:\n raise ValueError('\"save_as\" must be \"dict\", \"wav\", or \"mat\"')\n\n if fs is None:\n fs = get_tdt_rates()['25k']\n\n # General params:\n n = int(stim_dur * fs) # total number of samples\n t = np.linspace(0, stim_dur, n, endpoint=False) # time index for ploting\n\n#### make tone complex#########################################################\n\n tonecomp = np.zeros(24414. * stim_dur, float)\n fund = 250.0 # fundamental frequency\n for x in xrange(1, 5):\n freq = fund*x\n tonecomp = tonecomp + np.sin(freq * 2 * np.pi * np.arange\n (int(fs * stim_dur)) / float(fs))\n # windowing and onset/offset\n finalstim_tc = window_edges(tonecomp, fs, ramp_tone, -1, 'hamming')\n\n return finalstim_tc\n\n##### make noise burst#########################################################\n\n # add 50 points extra\n nb = np.random.normal(0, 1.0, int(fs * stim_dur) + 50)\n\n ### highpass cut-off freq of 1500Hz using 100th order Hamming ###\n b = sig.firwin(101, 1500. / (fs / 2), pass_zero=False) # False - highpass\n # nyq_rate = fs / 2\n # have to add '1' order\n filtered_stim = sig.lfilter(b, 1.0, nb)\n\n ### cut off extra 50 points from noiseburst ###\n filtered_stim = filtered_stim[50:]\n # windowing and onset/offset\n nb_ramped = window_edges(nb[50:], fs, ramp_noise, -1, 'hamming')\n finalstim_nb = np.multiply(nb_ramped, filtered_stim)\n\n return finalstim_nb", "def create_seq2seq_model_with_inital_state(timesteps, embedding_lenght, inital_state_vector):\n sequence_input = Input((timesteps, embedding_lenght), name=\"sequence_input\")\n initial_state = Input((inital_state_vector,), name=\"state_input\")\n\n lstm_out, state_h, state_c = LSTM(inital_state_vector, activation='relu', return_sequences=True,\n return_state=True, name='lstm_1')(sequence_input,\n initial_state=[initial_state, initial_state])\n runs_output = TimeDistributed(Dense(1, name='ts_individual_output'))(lstm_out)\n\n runs_output = Flatten(name='individual_output')(runs_output)\n total_runs_output = Dense(1, name='total_output')(state_h)\n\n runs_model = Model(inputs=[sequence_input, initial_state],\n outputs=[runs_output, total_runs_output])\n\n return runs_model", "def trajectory_simulator(close_loop_dynamics, initial_states, dt, length, noise_std):\n initial_states = np.atleast_2d(initial_states)\n n_envs = len(initial_states)\n ddim = len(initial_states[0])\n data = np.zeros((n_envs, ddim, length))\n data[:, :, 0] = initial_states\n for t in range(1, length):\n data[:, :, t] = close_loop_dynamics(data[:, :, t-1]).detach().numpy()\n data = data + np.random.normal(0, noise_std, data.shape) # zero-mean Guassian noise\n return data", "def embedding(streamlines, no_of_points):\r\n return np.array([set_number_of_points(s, no_of_points).ravel() for s in streamlines])", "def time_encoder(dt: tf.Tensor, size: int, dtype: tf.DType) -> tf.Tensor:\n weight = tf.get_variable(\n \"weight\",\n (size, ),\n dtype=dt.dtype,\n initializer=tf.random_normal_initializer(stddev=0.1),\n )\n bias = tf.get_variable(\"bias\", (size, ),\n dtype=dt.dtype,\n initializer=tf.zeros_initializer())\n cos = cos_fp16 if dtype == tf.float16 else tf.cos\n return cos(dt[..., tf.newaxis] * weight + bias)", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def embed(x, phase):\r\n\r\n is_train = True if phase == 'train' else False\r\n\r\n # Input embedding: convert input vector to dimension of hp.hidden_units.\r\n embs = input_embedding(x, num_units=hp.hidden_units, embed_type=hp.embed_type)\r\n print('Size after input embedding: ', embs.get_shape())\r\n\r\n # Positional Encoding.\r\n embs += embedding(tf.tile(tf.expand_dims(tf.range(tf.shape(x)[1]), 0), [tf.shape(x)[0], 1]),\r\n vocab_size=hp.win_len, num_units=hp.hidden_units,\r\n zero_pad=False, scale=False, scope=\"enc_pe\")\r\n print(\"Size after positional encoding: \", embs.get_shape())\r\n\r\n # Attention blocks.\r\n for i in range(hp.num_blocks):\r\n with tf.variable_scope(\"num_blocks_{}\".format(i)):\r\n # Multi-head Attention\r\n embs = multihead_attention(queries=embs, keys=embs, num_units=hp.hidden_units,\r\n num_heads=hp.num_heads, dropout_rate=hp.dropout_rate,\r\n is_training=is_train, causality=False)\r\n\r\n # Feed Forward\r\n embs = feedforward(embs, num_units=[2 * hp.hidden_units, hp.hidden_units])\r\n print(\"Size after multi-head_attention: \", embs.get_shape())\r\n\r\n # Temporal pooling by averaging on the time dimension.\r\n embs = tf.reduce_mean(embs, axis=1)\r\n\r\n return embs", "def fake_data(sample_rate=512,psd_segment_length=60,nsegs=16):\n epoch = 1153742417.0\n ts_data = numpy.random.normal(0,1,sample_rate*psd_segment_length*nsegs)\n ts_data = types.TimeSeries(ts_data,delta_t=1.0/sample_rate,epoch=epoch)\n return ts_data", "def create_sequential_model(timesteps, embedding_lenght):\n sequence_input = Input((timesteps, embedding_lenght), name=\"sequence_input\")\n\n lstm_out = LSTM(100, activation='relu', return_sequences=False,\n return_state=False, name='lstm_1')(sequence_input)\n # lstm_out = LSTM(40,activation='relu',return_sequences=False,\n # return_state=False,name='lstm_2')(lstm_out)\n # lstm_out = Flatten()(lstm_out)\n\n runs_output = Dense(10, name='dense_1', activation='relu')(lstm_out)\n # runs_output = Dense(5,name='dense_2',activation='relu')(runs_output)\n runs_output = Dense(1, name='final_output')(runs_output)\n\n runs_model = Model(inputs=[sequence_input],\n outputs=runs_output)\n\n return runs_model", "def embedding(signal, delay=2):\n\n fig = plt.figure(figsize=(20, 20))\n\n if delay == 2:\n emb = np.zeros((signal.shape[0] - 1, 2))\n\n emb[:, 0] = signal[:-1]\n emb[:, 1] = signal[1:]\n ax = fig.add_subplot(111)\n plt.plot(emb[:,0], emb[:,1], c='r')\n if delay == 3:\n emb = np.zeros((signal.shape[0] - 2, 3))\n\n emb[:, 0] = signal[:-2]\n emb[:, 1] = signal[1:-1]\n emb[:, 2] = signal[2:]\n\n ax = fig.add_subplot(111, projection='3d')\n plt.plot(emb[:,0], emb[:,1], emb[:,2], c='r')\n\n\n plt.show()\n plt.close()", "def generate_generator(latent_shape: Tuple[int], lstm_units:int = 64, activation_function:str=\"tanh\") -> tf.keras.Model:\n\n input = tf.keras.layers.Input(shape=latent_shape, name=\"generator_input\")\n decoded = tf.keras.layers.Flatten()(input)\n\n #first layer should be half the size of the sequence\n half_seq_length = seq_len // 2\n decoded = tf.keras.layers.Dense(units=half_seq_length)(decoded)\n decoded = tf.keras.layers.Reshape(target_shape=(half_seq_length, 1))(decoded) \n\n # generate a new timeseries using two ltsm layers that have 64 hidden units with upsampling between them\n decoder = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_units, return_sequences=True, dropout=0.2 , recurrent_dropout=0.2), merge_mode=\"concat\")(decoded)\n decoder = tf.keras.layers.UpSampling1D(size=2)(decoder)\n decoder = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_units, return_sequences=True, dropout=0.2 , recurrent_dropout=0.2), merge_mode=\"concat\")(decoder)\n\n #rebuild the original shape of the time series for all signals\n decoder = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(ts_input_shape[1]//n_iterations_critic))(decoder)\n decoder = tf.keras.layers.Activation(activation_function)(decoder)\n return tf.keras.Model(inputs=input, outputs=decoder, name=\"generator\")", "def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2", "def create_sinusoidal_movement(\n duration_sec, sample_period, x_velocity, y_start_offset, start_time):\n time = np.arange(0, duration_sec, sample_period) + start_time\n n_steps = len(time)\n x = np.linspace(0, x_velocity * duration_sec, n_steps)\n\n spatial_freq = 1.0 / 5.0 # 1 sinus per 5m\n omega = 2.0 * np.pi * spatial_freq\n y = np.sin(omega * x)\n y += y_start_offset\n\n dydx = omega * np.cos(omega * x)\n yaw = np.arctan2(dydx, np.ones_like(dydx))\n\n pqs = []\n for i in range(n_steps):\n R = pr.active_matrix_from_extrinsic_euler_zyx([yaw[i], 0, 0])\n T = pt.transform_from(R, [x[i], y[i], 0])\n pq = pt.pq_from_transform(T)\n pqs.append(pq)\n\n return time, np.array(pqs)", "def generate_random_MT():\n # Generate 6 indepdendent normal deviates:\n six_MT_unnormalised = np.array([np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0)], dtype=float)\n # Normallise sample onto unit 6-sphere:\n six_MT_normalised = six_MT_unnormalised/(np.sum(six_MT_unnormalised**2)**-0.5) # As in Muller (1959)\n # And normallise so that moment tensor magnitude = 1:\n six_MT_normalised = six_MT_normalised/((np.sum(six_MT_normalised**2))**0.5)\n # And set to correct dimensions (so matrix multiplication in forward model works correctly):\n six_MT_normalised = np.reshape(six_MT_normalised, (6, 1))\n return six_MT_normalised", "def time_series(self, length):\n maker = r.Recomposer(self._components, self.bias)\n return maker.time_series(length)", "def SNEmbedding(*args, **kwargs):\n return spectral_norm(nn.Embedding(*args, **kwargs))", "def make_timeseries_instances(timeseries, window_size):\n timeseries = np.asarray(timeseries)\n assert 0 < window_size < timeseries.shape[0]\n X = np.atleast_3d(np.array([timeseries[start:start + window_size] for start in range(0, timeseries.shape[0] - window_size)]))\n y = timeseries[window_size:]\n q = np.atleast_3d([timeseries[-window_size:]])\n return X, y, q", "def spectogram_2_waveform(spectogram, frame_length=512, frame_step=128,\n log_magnitude=True, instantaneous_frequency=True,\n n_mel_bins=None, mel_lower_hertz_edge=0.0,\n mel_upper_hertz_edge=8000.0):\n\n if len(spectogram.shape) == 3:\n spectogram = tf.expand_dims(spectogram, 0)\n\n magnitude = spectogram[:, :, :, 0]\n phase = spectogram[:, :, :, 1]\n\n if n_mel_bins:\n magnitude = _mel_to_linear_scale(\n magnitude, frame_length//2, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n phase = _mel_to_linear_scale(\n phase, frame_length//2, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n\n if log_magnitude:\n magnitude = tf.math.exp(magnitude) - _EPSILON\n\n if instantaneous_frequency:\n phase = tf.cumsum(phase, axis=-2)\n phase = (phase + np.pi) % (2 * np.pi) - np.pi\n\n # Set the nyquist frequency to zero (the band we earlier removed).\n # This is also commonly done in these other papers.\n magnitude = tf.pad(magnitude, [[0, 0], [0, 0], [0, 1]], constant_values=0)\n phase = tf.pad(phase, [[0, 0], [0, 0], [0, 1]], constant_values=0)\n\n real = magnitude * tf.math.cos(phase)\n img = magnitude * tf.math.sin(phase)\n\n stft = tf.complex(real, img)\n inv_window_fn = tf.signal.inverse_stft_window_fn(frame_step, forward_window_fn=WINDOW_FN)\n waveform = tf.signal.inverse_stft(\n stft, frame_length=frame_length,\n frame_step=frame_step, window_fn=inv_window_fn\n )\n return waveform", "def perform_stft(x,w,q,n):\n\n #bound = getoptions(options, 'bound', 'per');\n #transform_type = getoptions(options, 'transform_type', 'fourier');\n #normalization = getoptions(options, 'normalization', 'tightframe');\n #window_type = getoptions(options, 'window_type', 'sin');\n #eta = getoptions(options, 'eta', 1);\n \n if np.ndim(x) == 1:\n dir = 1\n else:\n dir = -1\n \n # perform sampling\n X = np.arange(1,n+2,q)\n\n p = len(X)\n eta = 1\n \n if w%2 == 1:\n w = np.ceil((w-1)/2)*2+1\n w1 = (w-1)//2\n dX = np.arange(-w1,w1+1)\n else:\n dX = np.arange(-w//2+1,w//2+1)\n \n X1 = np.tile(X,(w,1)) + np.transpose(np.tile(dX, (p,1)))\n #periodic boundary conditions\n X1 = ((X1-1)%n)+1;\n \n I = X1 -1\n \n # build a sin weight function\n W = .5 *(1 - np.cos(2*np.pi*np.arange(0,w)/(w-1)))\n \n #renormalize the windows\n weight = np.zeros(n)\n \n for i in range(p):\n weight[I[:,i]] = weight[I[:,i]] + W**2\n \n weight = np.sqrt(weight)\n Weight = np.transpose(np.tile(W, (p,1)))\n \n for i in range(p):\n Weight[:,i] = Weight[:,i]/weight[I[:,i]]\n \n #compute the transform\n if dir == 1:\n y = np.zeros([eta*w,p])\n if w%2 == 1:\n m = (eta*w+1)//2\n w1 = (w-1)//2\n sel = np.arange(m-w1,m+w1+1) - 1\n else:\n m = (eta*w)//2+1 \n w1 = w//2\n sel = np.arange(m-w1,m+w1) - 1\n y[sel,:] = x[I]*Weight\n\n #perform the transform\n y = my_transform(y,+1)\n\n else:\n x = my_transform(x,-1)\n x = np.real(x*Weight)\n y = np.zeros(n)\n for i in range(p):\n y[I[:,i]] = y[I[:,i]] + x[:,i]\n\n return y", "def generate_synth_data(n):", "def create_trainset(\n self,\n s=\"2016-01-01\",\n e=\"2018-12-31\",\n freq=\"D\",\n ) -> DateTensors:\n self.date_series = DatasetDateSeries(\n start=s, end=e, wsz=self.wsz, to_onehot=True\n ) # wsz same as W\n\n # ti window data to tensor\n ti = Tsr(self.date_series.ti_win)\n\n # tc window data to tensor\n N, W, Dtc = len(ti), self.date_series.wsz, 3\n tc = torch.randint(0, 2, (1, 1, Dtc)).repeat(N, W, 1) # shape: (N, W, Dtc)\n\n # kn window data to tensor\n kn = Tsr(self.date_series.kn_win)\n\n # create target data as `tg` (target)\n tg = self.dct_curve[self.model_type](ti).repeat(1, 1, self.Dout)\n\n ti, tc, kn, tg = self.to_device(ti, tc, kn, tg)\n trainset = DateTensors(\n ti=ti, tc=tc, kn=kn, tg=tg, device=self.device\n ) # ti/tc/kn.shape: (N, W, Dout), tg.shape = (N, 1, Dout)\n self.trainset = trainset\n return trainset", "def gen_TT(Tstart=1,Tend=1000,Tstep=1):\n Tsteps = int((Tend-Tstart)//Tstep)\n TT = np.zeros(Tsteps)\n for i in range(0,Tsteps):\n TT[i] = Tstart + i*Tstep\n \n return TT", "def embed_data(\n self,\n data: Dict[str, tf.SparseTensor]\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n\n batch_shape = tf.shape(data[\"t\"])[:-1]\n flat_data = nest.map_structure(batches.flatten_batch, data)\n flat_data = nest.map_structure(batches.sparse_fill_empty_rows, flat_data)\n\n context_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.context_features))\n context_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), context_embeddings)\n\n sequential_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.sequential_features))\n sequential_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), sequential_embeddings)\n\n dt = tf.divide(tf.cast(data[\"dt\"], dtype=tf.float32), 5400.)\n t = tf.divide(tf.cast(data[\"t\"], dtype=tf.float32), 5400.)\n dt_log = tf.log(dt + 1.)\n\n embedding_dict = sequential_embeddings.copy()\n embedding_dict.update(context_embeddings)\n embedding_dict[\"dt_s\"] = tf.matmul(dt_log, self.w_dt)\n combined_embedding = self._combine_embeddings_for_input(embedding_dict)\n inputs = combined_embedding\n if self._config.get(\"apply_bias\", False):\n inputs = inputs + tf.get_variable(\n \"_\".join([self._config.embedding_type, \"final_bias\"]),\n shape=[self.get_total_embedding_size()],\n initializer=tf.zeros_initializer)\n time_vect = t\n\n return inputs, time_vect", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def generate_ensemble_time_series(dsys, n_traj, len_traj):\n\n for i in range(n_traj):\n # Training initial condition.\n x0_train = normal(loc=0.0, scale=1.0, size=(dsys.A.shape[1]))\n\n # Run simulation to generate dataset.\n t, _, x = dlsim(dsys, np.zeros((len_traj, dsys.inputs)), x0=x0_train)\n\n # Store the data.\n if i == 0:\n X, Y = x.T[:, :-1], x.T[:, 1:]\n else:\n X, Y = np.c_[X, x.T[:, :-1]], np.c_[Y, x.T[:, 1:]]\n return X, Y", "def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_", "def Sinusoidal_Embeddings(positions, d_feature):\n inv_freq = 1 / (10000**(jnp.arange(0.0, d_feature, 2.0) / d_feature))\n sinusoid_freq = jnp.einsum('i,j->ij', positions, inv_freq)\n pos_emb = jnp.concatenate(\n [jnp.sin(sinusoid_freq), jnp.cos(sinusoid_freq)], axis=1)\n return pos_emb", "def _decoding_step_time_signal(self, target_embeds, decode_loop_step):\n # TODO(hongkuny): migrate to keras bert and design a module to handle this.\n output = target_embeds\n if self.embedding_postprocessor.use_position_embeddings:\n position_embeddings = tf.gather(\n self.embedding_postprocessor.position_embeddings, [decode_loop_step])\n # Broadcasts to all sequences inside a batch.\n output += position_embeddings\n\n output = self.embedding_postprocessor.output_layer_norm(output)\n output = self.embedding_postprocessor.output_dropout(output)\n return output", "def __init__(self, lstm_step=80, input_d=300, vocab_size=2196018, embedding=None):\n self.raw_premise = tf.placeholder(shape=[None, lstm_step], dtype=tf.int32, name='premise')\n self.premise_length = tf.placeholder(shape=[None], dtype=tf.int32, name='premise_length')\n\n self.raw_hypothesis = tf.placeholder(shape=[None, lstm_step], dtype=tf.int32, name='hypothesis')\n self.hypothesis_length = tf.placeholder(shape=[None], dtype=tf.int32, name='hypothesis_length')\n\n self.label = tf.placeholder(shape=[None], dtype=tf.int32)\n # Those operations take too many memory\n # Use cpu for those operations (deprecated when using truncate embedding)\n if embedding is not None:\n self.input_embedding = tf.placeholder(dtype=tf.float32, shape=embedding.shape, name='word_embedding')\n self.embedding = tf.Variable(tf.zeros(embedding.shape, dtype=tf.float32))\n else:\n \"\"\"\n If embedding is not provided, then use random number as embedding\n \"\"\"\n self.embedding = tf.Variable(tf.random_uniform([vocab_size, input_d], minval=-0.05, maxval=0.05))\n \"\"\"\n This is the embedding operation. It will be invoked by loading embedding function in the actual model\n \"\"\"\n self.load_embedding_op = self.embedding.assign(self.input_embedding)\n\n self.premise = tf.nn.embedding_lookup(self.embedding, self.raw_premise)\n self.hypothesis = tf.nn.embedding_lookup(self.embedding, self.raw_hypothesis)", "def create_beams(x):\n return tf.repeat(x, self.num_beams, axis=0)", "def make_frame(t):\r\n while world['t'] < hours_per_second*t:\r\n update(world)\r\n return world_to_npimage(world)", "def generate_initial_embs(emb_type):\n def _get_emb_avg(g, lang):\n \"\"\"Compute the embedding of g as the average of its word embeddings\n :param g: the input genre\n :param lang: language\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _get_emb_wavg(g, lang, a=0.001):\n \"\"\"Compute the embeddings of g with a sentence embedding algorithm (average weighted by the word estimated frequencies)\n :param g: the input genre\n :param lang: language\n :param a: a model hyper-parameter (see Arora et al. in the paper)\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _remove_pc(df_embs, npc=1):\n \"\"\"Remove the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the normalized embeddings\n \"\"\"\n pc = _compute_pc(df_embs, npc)\n if npc == 1:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()) * pc\n else:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()).dot(pc)\n return df_embs_out\n\n def _compute_pc(df_embs, npc=1):\n \"\"\"Compute the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the principal component\n \"\"\"\n svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)\n svd.fit(df_embs)\n return svd.components_\n\n embs = {}\n known = {}\n for g in G.nodes:\n lang = g[:2]\n norm_g = TagManager.normalize_tag_wtokenization(g, tries[lang], prefixed=True)\n if emb_type == 'avg':\n embs[g], known[g] = _get_emb_avg(norm_g, lang)\n else:\n embs[g], known[g] = _get_emb_wavg(norm_g, lang)\n\n embs = pd.DataFrame(embs).T # the embeddings are columns\n if emb_type == 'sif': # the algorithm imposes a normalization\n norm_embs = _remove_pc(embs.to_numpy())\n embs = pd.DataFrame(norm_embs, columns=embs.columns, index=embs.index)\n return embs, known", "def make_model(n_dimensions, seed):\n with spa.SPA(seed=seed) as model:\n # Create the state holding element\n model.state = spa.State(dimensions=n_dimensions,\n feedback=1.0, feedback_synapse=0.01)\n\n # Create the state transitions\n actions = spa.Actions(*(\"dot(state, {}) --> state = {}\".format(x, y) for\n (x, y) in zip(\"ABCDE\", \"BCDEA\")))\n model.bg = spa.BasalGanglia(actions=actions)\n model.thal = spa.Thalamus(model.bg)\n\n # Create the input for the initial state\n model.input = spa.Input(state=lambda t: 'A' if t < 0.05 else '0')\n\n return model", "def create_ws_sticker_sequences() -> None:\n with connection.cursor() as cursor:\n start_year = 2020\n for i in range(25):\n year = start_year + i\n sql = \"CREATE SEQUENCE IF NOT EXISTS ws_stickers_{}_{} START 1;\".format(\n year, year + 1\n )\n cursor.execute(sql)", "def build_lstm11(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape['max_length'],\n trainable=False,\n weights=[embeddings],\n mask_zero=False\n )\n )\n model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9a'))\n model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9a'))\n # model.add(GlobalMaxPool1D())\n # model.add(BatchNormalization())\n # model.add(Dropout(settings['dropout'] / 2.0))\n\n # model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9b'))\n model.add(Bidirectional(LSTM(shape['n_hidden'] // 2, return_sequences=True,\n recurrent_dropout=settings['dropout'],\n dropout=settings['dropout']), name='bidi9c'))\n model.add(GlobalMaxPool1D(name='mp9'))\n model.add(BatchNormalization(name='bn9'))\n model.add(Dropout(settings['dropout'] / 2.0, name='drop9b'))\n\n model.add(Dense(shape['n_class'], activation='sigmoid', name='den9b'))\n xprint('build_lstm9: embeddings=%s shape=%s' % (dim(embeddings), shape))\n return model", "def MakeLongTime(t, N):\n dt=(t.max()-t.min())/(max(shape(t))-1)\n tout=arange(0,N,dtype='f')\n tout=tout*dt\n return tout", "def build_timeseries(mat, TIME_STEPS, y_col_index = None):\n dim_0 = mat.shape[0] - TIME_STEPS\n D = mat.shape[1]\n x = np.zeros((dim_0, TIME_STEPS, D))\n if y_col_index:\n y = np.zeros((dim_0, ))\n else:\n y = np.zeros((dim_0, D))\n \n for i in range(dim_0):\n x[i] = mat[i:TIME_STEPS+i]\n if y_col_index:\n y[i] = mat[TIME_STEPS + i, y_col_index]\n else:\n y[i] = mat[TIME_STEPS + i, :]\n\n print(\"length of time-series i/o\",x.shape,y.shape)\n return x, y", "def _add_seq2seq(self):\n mode = self._mode\n vsize = self._vocab.size() # size of the vocabulary\n\n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-config.rand_unif_init_mag, config.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=config.trunc_norm_init_std)\n\n # Add embedding matrix (shared by the encoder and decoder inputs)\n with tf.variable_scope('embedding'):\n embedding = tf.get_variable('embedding', [vsize, config.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n if mode==\"train\": self._add_emb_vis(embedding) # add to tensorboard\n emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_dec_inputs = tf.nn.embedding_lookup(embedding, self._dec_batch) # tensor with shape (batch_size, max_dec_steps, emb_size)\n #emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)\n\n # Add the encoder.\n enc_fw_states, enc_bw_states, enc_fw, enc_bw = self._add_input_encoder(emb_enc_inputs, self._enc_lens)\n\n print(\"Encoder FW\", enc_fw_states.shape)\n print(\"Encoder BW\", enc_bw_states.shape)\n raise Exception(\"testing mode\")\n\n #reshape encoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\n enc_fw_states = tf.reshape(enc_fw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_fw_states)[1]])\n enc_bw_states = tf.reshape(enc_bw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_bw_states)[1]])\n\n\n # python run.py --mode=decode --data_path=data/chunked/train_1/train_1_*.bin --vocab_path=data/vocab_1 --exp_name=full1isto1\n\n # Add the decoder.\n dec_fw_states, dec_bw_states = self._add_input_decoder(emb_dec_inputs, self._dec_lens, enc_fw, enc_bw)\n\n #reshape decoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\n dec_fw_states = tf.reshape(dec_fw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_fw_states)[1]])\n dec_bw_states = tf.reshape(dec_bw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_bw_states)[1]])\n #print(\"Decoder FW\", dec_fw_states.shape)\n #print(\"Decoder BW\", dec_bw_states.shape)\n\n\n #enc_c = tf.concat(axis=1, values=[enc_fw.c, enc_bw.c])\n #enc_h = tf.concat(axis=1, values=[enc_fw.h, enc_bw.h])\n #dec_c = tf.concat(axis=1, values=[dec_fw.c, dec_bw.c])\n #dec_h = tf.concat(axis=1, values=[dec_fw.h, dec_bw.h])\n\n final_encoding = tf.concat(axis=1, values=[enc_fw_states, enc_bw_states, dec_fw_states, dec_bw_states])\n #print(\"Final encoding\", final_encoding.shape)\n #raise Exception(\"Test\")\n dims_final_enc = tf.shape(final_encoding)\n\n \"\"\"\n #convo_input = tf.concat(axis=1, values=[enc_c, enc_h, dec_c, dec_h])\n input_layer = tf.reshape(final_encoding, [config.batch_size, dims_final_enc[1], 1])\n print(\"Convolution input shape\", input_layer.shape)\n\n conv1 = tf.layers.conv1d(\n inputs=input_layer,\n filters=8,\n kernel_size=5,\n padding=\"same\",\n activation=tf.nn.relu)\n conv1 = tf.layers.batch_normalization(conv1)\n print(\"Convolution1 output shape\", conv1.shape)\n\n pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2)\n print(\"Pool1 output shape\", pool1.shape)\n\n conv2 = tf.layers.conv1d(\n inputs=pool1,\n filters=16,\n kernel_size=5,\n padding=\"same\",\n activation=tf.nn.relu)\n\n\n conv2 = tf.layers.batch_normalization(conv2)\n print(\"Convolution2 output shape\", conv2.shape)\n\n pool2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2)\n print(\"Pool2 output shape\", pool2.shape)\n\n dims_pool2 = tf.shape(pool2)\n\n pool2_flat = tf.reshape(pool2, [config.batch_size, dims_pool2[1] * 16])\n print(\"Pool2_flat output shape\", pool2_flat.shape)\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n \"\"\"\n #raise Exception(\"testing mode\")\n\n #dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode==\"train\")\n #print(\"Dense output shape\", dense.shape)\n\n #raise Exception(\"Just testing\")\n # Add the output projection to obtain the vocabulary distribution\n with tf.variable_scope('output_projection'):\n w = tf.get_variable('w', [dims_final_enc[1], 2], dtype=tf.float32, initializer=self.trunc_norm_init)\n bias_output = tf.get_variable('bias_output', [2], dtype=tf.float32, initializer=self.trunc_norm_init)\n #concatenate abstract and article outputs [batch_size, hidden_dim*4]\n\n\n #get classification output [batch_size, 1] default on last axis\n self._logits = tf.matmul(final_encoding, w) + bias_output\n #self._logits = tf.layers.dense(final_encoding, 2, kernel_initializer=self.trunc_norm_init, bias_initializer=self.trunc_norm_init)\n #self._prob = tf.nn.softmax(logits, \"class_prob\")\n\n if mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'):\n #self._prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self._targets)\n #class_weights = tf.constant([0.1, 5.])\n self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._targets, logits=self._logits))\n #self._loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self._targets, logits=self._logits, pos_weight=class_weights))\n tf.summary.scalar('loss', self._loss)\n\n\n\n #if mode == \"decode\":", "def waveform_2_spectogram(waveform, frame_length=512, frame_step=128,\n log_magnitude=True, instantaneous_frequency=True,\n n_mel_bins=None, mel_lower_hertz_edge=0.0,\n mel_upper_hertz_edge=8000.0):\n\n if len(waveform.shape) == 1:\n waveform = tf.expand_dims(waveform, 0)\n\n stft = tf.signal.stft(\n waveform, frame_length=frame_length, frame_step=frame_step,\n pad_end=True, window_fn=WINDOW_FN\n )\n\n # Truncate the nyquist frequency, commonly done in other papers,\n # also makes computation easier.\n magnitude = tf.abs(stft)[:, :, 0:-1]\n phase = tf.math.angle(stft)[:, :, 0:-1]\n\n if n_mel_bins:\n magnitude = _linear_to_mel_scale(\n magnitude, n_mel_bins, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n phase = _linear_to_mel_scale(\n phase, n_mel_bins, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n\n if log_magnitude:\n magnitude = tf.math.log(magnitude + _EPSILON)\n\n if instantaneous_frequency:\n phase = np.unwrap(phase)\n phase = np.concatenate([np.expand_dims(phase[:, 0, :], axis=-2),\n np.diff(phase, axis=-2)], axis=-2).astype(np.float32)\n\n spectogram = tf.concat([tf.expand_dims(magnitude, 3),\n tf.expand_dims(phase, 3)], axis=-1)\n\n return spectogram", "def generate_lstm_input_sequence(\n input_tensor: Tensor,\n seq_len: int,\n window_shift_step_size: int\n):\n num_iterations = (seq_len // window_shift_step_size)\n num_vars = input_tensor.shape[1]\n tensor_list = []\n for i in range(num_iterations):\n # calculate how much the window has to be shifted\n window_shift = i * window_shift_step_size\n # shift the input tensor\n shifted_tensor = input_tensor[window_shift:, :]\n # evaluate new size\n total_time_steps = shifted_tensor.shape[0]\n # evalute the new sample size\n sample_size = total_time_steps // seq_len\n # crop samples that cannot be used (as not devidable by sample size)\n upper_bound = sample_size * seq_len\n # log values\n logger.debug('creating {} samples using data idx {} to {}'.format(\n str(sample_size),\n str(window_shift),\n str(upper_bound + window_shift)\n ))\n # subset shifted tensor to match sample size\n subset_tensor = shifted_tensor[0:upper_bound, :]\n # create input_samples\n input_samples = subset_tensor.view(sample_size, seq_len, num_vars)\n # add it to the list\n tensor_list.append(input_samples)\n\n return(torch.cat(tensor_list, dim=0))", "def Aut(A):\n return Embeddings(A,A)", "def create_tweet(model):\n return model.make_short_sentence(140, init_state=None)", "def cal_embedding(As, Vs):\n global RcdTime, RcdIndex\n global Lamda_Min\n RcdTime = []\n \n # data to return\n embeddings = []\n \n for ii in xrange(len(As)):\n RcdTime.append(np.zeros(10, dtype=float))\n RcdIndex = ii\n \n print \"Calculating graph number\", ii, \"...\"\n \n # calculate walvet spectrum\n start = time()\n Lamdas, Tfs = wavelet_spectrum_fast(As[ii], Vs[ii])\n RcdTime[RcdIndex][0] = time() - start\n \n # calculate indicators as embedding\n start = time()\n embeddings.extend(spectrum_indicators(Lamdas, Tfs))\n RcdTime[RcdIndex][1] = time() - start\n \n embeddings = np.array(embeddings)\n \n # save some result\n np.savetxt(\"saves/TimeRcds.csv\", np.array(RcdTime), delimiter=',', \n header=\"walvet spectrum, calculate indicators, WS - eigen decompose, WS - kernel and operator, WS - calculate Tf\")\n \n # return\n return embeddings, Lamdas, Tfs", "def forward(self, hidden_state=None, messages=None, tau=1.2):\n\n if messages is None:\n hidden_state = self.input_module(hidden_state)\n state, batch_size = self._init_state(\n hidden_state, type(self.rnn))\n\n # Init output\n if self.training:\n output = [\n torch.zeros(\n (batch_size, self.vocab_size),\n dtype=torch.float32,\n device=self.device,\n )\n ]\n output[0][:, self.sos_id] = 1.0\n else:\n output = [\n torch.full(\n (batch_size,),\n fill_value=self.sos_id,\n dtype=torch.int64,\n device=self.device,\n )\n ]\n\n # Keep track of sequence lengths\n initial_length = self.output_len + 1 # add the sos token\n seq_lengths = (\n torch.ones([batch_size], dtype=torch.int64, device=self.device)\n * initial_length\n )\n\n embeds = [] # keep track of the embedded sequence\n entropy = 0.0\n sentence_probability = torch.zeros(\n (batch_size, self.vocab_size), device=self.device\n )\n\n for i in range(self.output_len):\n if self.training:\n emb = torch.matmul(output[-1], self.embedding)\n else:\n emb = self.embedding[output[-1]]\n\n embeds.append(emb)\n state = self.rnn(emb, state)\n\n if type(self.rnn) is nn.LSTMCell:\n h, c = state\n else:\n h = state\n\n p = F.softmax(self.linear_out(h), dim=1)\n entropy += Categorical(p).entropy()\n\n if self.training:\n token = self.utils_helper.calculate_gumbel_softmax(p, tau, hard=True)\n else:\n sentence_probability += p.detach()\n if self.greedy:\n _, token = torch.max(p, -1)\n\n else:\n token = Categorical(p).sample()\n\n if batch_size == 1:\n token = token.unsqueeze(0)\n\n output.append(token)\n\n self._calculate_seq_len(\n seq_lengths, token, initial_length, seq_pos=i + 1\n )\n\n return (\n torch.stack(output, dim=1),\n seq_lengths,\n torch.mean(entropy) / self.output_len,\n torch.stack(embeds, dim=1),\n sentence_probability,\n )\n\n else:\n batch_size = messages.shape[0]\n\n emb = (\n torch.matmul(messages, self.embedding)\n if self.training\n else self.embedding[messages]\n )\n\n # initialize hidden\n h = torch.zeros([batch_size, self.hidden_size], device=self.device)\n if self.cell_type == \"lstm\":\n c = torch.zeros([batch_size, self.hidden_size], device=self.device)\n h = (h, c)\n\n # make sequence_length be first dim\n seq_iterator = emb.transpose(0, 1)\n for w in seq_iterator:\n h = self.rnn(w, h)\n\n if self.cell_type == \"lstm\":\n h = h[0] # keep only hidden state\n\n out = self.output_module(h)\n\n return out, emb", "def np_to_time_tensor_generator(self,windowSize):\n if np.ndim(self.arr) > 1:\n for ix,v in enumerate(self.arr):\n yield self._tensor_factory(v,windowSize,ix,self.seqTransformerList)\n else:\n yield self._tensor_factory(self.arr,windowSize,0,self.seqTransformerList)", "def create_sequential_model_with_inital_state(timesteps, embedding_lenght, inital_state_vector):\n sequence_input = Input((timesteps, embedding_lenght), name=\"sequence_input\")\n initial_state = Input((inital_state_vector,), name=\"state_input\")\n\n lstm_out = LSTM(inital_state_vector, activation='relu', return_sequences=False,\n return_state=False, name='lstm_1')(sequence_input, initial_state=[initial_state, initial_state])\n runs_output = Dense(1, name='final_output')(lstm_out)\n\n runs_model = Model(inputs=[sequence_input, initial_state],\n outputs=runs_output)\n\n return runs_model", "def specwv(fx,tstep=2**5,nfbins=2**10,nhs=2**8,nhwv=2**9-1,ngwv=2**3-1,df=1.0):\r\n \r\n #calculate stft\r\n pst,tlst,flst=stft(fx,nh=nhs,tstep=tstep,nfbins=nfbins,df=df)\r\n \r\n #calculate new time step so WVD and STFT will align\r\n ntstep=len(fx)/(len(tlst)*2.)\r\n \r\n #calculate spwvd\r\n pwv,twv,fwv=spwvd(fx,tstep=ntstep,nfbins=nfbins,df=df,nh=nhwv,ng=ngwv)\r\n \r\n #multiply the two together normalize\r\n tfarray=pst/pst.max()*pwv/pwv.max()\r\n \r\n return tfarray,tlst,flst", "def tts(model, text):\n\tif USE_CUDA:\n\t\tmodel = model.cuda()\n\t\n\t# NOTE: dropout in the decoder should be activated for generalization!\n\t# model.decoder.eval()\n\tmodel.encoder.eval()\n\tmodel.postnet.eval()\n\n\tsequence = np.array(text_to_sequence(text))\n\tsequence = Variable(torch.from_numpy(sequence)).unsqueeze(0)\n\tif USE_CUDA:\n\t\tsequence = sequence.cuda()\n\n\t# Greedy decoding\n\tmel_outputs, linear_outputs, gate_outputs, alignments = model(sequence)\n\n\tlinear_output = linear_outputs[0].cpu().data.numpy()\n\tspectrogram = audio._denormalize(linear_output)\n\talignment = alignments[0].cpu().data.numpy()\n\n\t# Predicted audio signal\n\twaveform = audio.inv_spectrogram(linear_output.T)\n\n\treturn waveform, alignment, spectrogram", "def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model", "def temporal_sampling(\n num_frames, start_idx, end_idx, num_samples, start_frame=0\n):\n index = torch.linspace(start_idx, end_idx, num_samples)\n index = torch.clamp(index, 0, num_frames - 1).long()\n return start_frame + index", "def positional_encoding(seq_len, embed_dim, timescale=10000):\n\n if embed_dim % 2 != 0:\n raise ValueError(\"Embedding dimension must be even\")\n\n positions = jnp.arange(seq_len)\n i = jnp.arange(embed_dim//2)\n angular_frequencies = 1/jnp.power(timescale, 2*i/embed_dim)\n\n angles = jnp.outer(positions, angular_frequencies)\n cosine = jnp.cos(angles) # seq_len, embed_dim // 2\n sine = jnp.sin(angles) # seq_len, embed_dim // 2\n\n pos_enc = jnp.concatenate([cosine, sine], axis=1)\n\n return pos_enc", "def ESIM(features: Dict[str, Field],\n targets: Dict[str, Field],\n text_embedder,\n lstm_units: int = 128,\n lstm_kwargs: Dict = None,\n hidden_units: int = 64,\n dropout: float = 0.5,\n label_field: str = 'label'):\n\n inputs = utils.create_inputs(features)\n input_premise = inputs['premise']\n input_hypothesis = inputs['hypothesis']\n embedded_premise = text_embedder(input_premise)\n embedded_hypothesis = text_embedder(input_hypothesis)\n\n lstm_kwargs = deepcopy(lstm_kwargs) if lstm_kwargs else {}\n lstm_kwargs.pop('return_sequences', None)\n lstm = tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(lstm_units, return_sequences=True, **lstm_kwargs))\n encoded_premise = lstm(embedded_premise)\n encoded_hypothesis = lstm(embedded_hypothesis)\n\n aligned_premise = Attention()([encoded_premise, encoded_hypothesis])\n aligned_hypothesis = Attention()([encoded_hypothesis, encoded_premise])\n\n diff_premise = tf.keras.layers.Subtract()(\n [encoded_premise, aligned_premise])\n mul_premise = tf.keras.layers.Multiply()(\n [encoded_premise, aligned_premise])\n combined_premise = tf.keras.layers.Concatenate()(\n [encoded_premise, aligned_premise, diff_premise, mul_premise])\n\n diff_hypothesis = tf.keras.layers.Subtract()(\n [encoded_hypothesis, aligned_hypothesis])\n mul_hypothesis = tf.keras.layers.Multiply()(\n [encoded_hypothesis, aligned_hypothesis])\n combined_hypothesis = tf.keras.layers.Concatenate()(\n [encoded_hypothesis, aligned_hypothesis, diff_hypothesis, mul_hypothesis])\n\n compose_lstm = tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(lstm_units, return_sequences=True, **lstm_kwargs))\n composed_premise = compose_lstm(combined_premise)\n composed_hypothesis = compose_lstm(combined_hypothesis)\n\n merged = tf.keras.layers.Concatenate()(\n [BOWEncoder(averaged=True)(composed_premise),\n tf.keras.layers.GlobalMaxPooling1D()(composed_premise),\n BOWEncoder(averaged=True)(composed_hypothesis),\n tf.keras.layers.GlobalMaxPooling1D()(composed_hypothesis)])\n if dropout:\n merged = tf.keras.layers.Dropout(dropout)(merged)\n if hidden_units:\n merged = tf.keras.layers.Dense(hidden_units, activation='tanh')(merged)\n probs = tf.keras.layers.Dense(len(targets[label_field].vocab),\n activation='softmax',\n name=label_field)(merged)\n return tf.keras.models.Model(inputs=list(inputs.values()),\n outputs=probs,\n name=\"ESIM\")", "def synthetic_seismogram(green, wavelet):\n return np.real(ifft(fft(wavelet) * fft(green)))", "def make_embedding_matrix(docs, size, min_count = 5, window = 5, n_iter = 5, savename = None, workers = 3):\n\n print('Starting the embedding generation')\n t0 = time.time()\n model = gensim.models.Word2Vec(docs, min_count=min_count, window = window,\n size = size, iter = n_iter, workers = workers)\n t1 = time.time()\n print('All done, total time %s' % (t1-t0))\n \n if savename is not None:\n model.save(savename)\n \n return model", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n with tf.variable_scope('vc'):\n with tf.variable_scope('lstm', reuse=True):\n embedding_mat = tf.get_variable(\"embedding_mat\", [config.num_vocab, config.embed_dim])\n init_we = tf.assign(embedding_mat, embedding_mat_val)\n return [init_we]", "def mass_spring(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n\n def hamiltonian_fn(coords):\n q, p = np.split(coords, 2)\n\n H = (p ** 2) / 2 + (q ** 2) / 2 # spring hamiltonian (linear oscillator)\n return H\n\n def dynamics_fn(t, coords):\n dcoords = autograd.grad(hamiltonian_fn)(coords)\n dqdt, dpdt = np.split(dcoords, 2)\n S = np.concatenate([dpdt, -dqdt], axis=-1)\n return S\n\n def get_trajectory(t_span=[0, 3], timescale=0.01, ssr=sub_sample_rate, radius=None, y0=None, noise_std=0.1,\n **kwargs):\n\n # get initial state\n if y0 is None:\n y0 = np.random.rand(2) * 2 - 1\n if radius is None:\n radius = np.sqrt(np.random.uniform(0.5, 4.5))\n y0 = y0 / np.sqrt((y0 ** 2).sum()) * (radius)\n\n spring_ivp = rk(lambda t, y: dynamics_fn(t, y), t_span, y0,\n t_eval=np.arange(0, t_span[1], timescale),\n rtol=1e-12, atosl=1e-12, method='DOP853')\n\n accum = spring_ivp.y.T\n ssr = int(ssr / timescale)\n accum = accum[::ssr]\n\n daccum = [dynamics_fn(None, accum[i]) for i in range(accum.shape[0])]\n energies = []\n for i in range(accum.shape[0]):\n energies.append(np.sum(hamiltonian_fn(accum[i])))\n\n return accum, np.array(daccum), energies\n\n def get_dataset(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, seed=seed, test_split=0.5, **kwargs):\n data = {'meta': locals()}\n\n # randomly sample inputs\n np.random.seed(seed)\n data = {}\n ssr = int(sub_sample_rate / dt)\n\n xs, dxs, energies, ks, ms = [], [], [], [], []\n for s in range(num_trajectories):\n x, dx, energy = get_trajectory(t_span=[0, T_max], timescale=dt, ssr=sub_sample_rate)\n\n x += np.random.randn(*x.shape) * noise_std\n dx += np.random.randn(*dx.shape) * noise_std\n\n xs.append(x)\n dxs.append(dx)\n energies.append(energy)\n ks.append([1])\n ms.append([1])\n\n data['x'] = np.concatenate(xs)\n data['dx'] = np.concatenate(dxs)\n data['energy'] = np.concatenate(energies)\n data['ks'] = np.concatenate(ks)\n data['mass'] = np.concatenate(ms)\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(data, f)\n f.close()\n\n return data\n\n return get_dataset(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate)", "def time_delay_embed(array, dimension, time_dif):\r\n emb = array.values # Converts the panda dataframe to an array\r\n emb = np.squeeze(np.asarray(emb)) # Make a 1-d array of all values\r\n i = len(emb) - 1 # sets up a counter\r\n new_vec = [] # target for each row\r\n embed = [] # target for full set\r\n while i >= dimension-1:\r\n a = 0 # the dimensional counter\r\n b = 0 # time_dif counter\r\n while a< dimension:\r\n new_vec.append(emb[i-b])\r\n a+=1\r\n b+= time_dif\r\n embed.append(new_vec)\r\n new_vec = []\r\n i -=1\r\n \r\n X = np.array(embed)\r\n \r\n return np.flipud(X)", "def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output", "def build_sentence_encoder(self, raw_encoder_input, input_seq_len):\n with tf.variable_scope('text_encoder'):\n self.embedding = \\\n tf.get_variable(\n \"embedding\", initializer=tf.random_uniform(\n [self.config.word_voc_size,\n self.config.word_embedding_space_size],\n -self.config.TRAIN.SENCODER.none_rnn_para_initial_max,\n self.config.TRAIN.SENCODER.none_rnn_para_initial_max))\n inputs = tf.nn.embedding_lookup(self.embedding, raw_encoder_input)\n\n # now it is [MAX_SEQ_LENGTH, batch_size, embedding_length]\n input_batch_order = tf.transpose(inputs, [1, 0, 2])\n\n # now it is [MAX_SEQ_LENGTH * batch_size, embedding_length]\n input_batch_order = tf.reshape(\n input_batch_order, [-1, self.config.word_embedding_space_size])\n\n # now it is LIST OF [BATCH_SIZE, embedding_length]\n encoder_input = tf.split(0, self.config.seq_max_len,\n input_batch_order)\n\n # the encoder part\n encode_gru_cell = tf.nn.rnn_cell.GRUCell(\n self.config.encoder_dimension)\n # big news: The state is final state, output is a list of tensor.\n # We don't to do that\n _, sentence_rep = tf.nn.rnn(encode_gru_cell, encoder_input,\n dtype=tf.float32,\n sequence_length=input_seq_len)\n self.sentence_rep = sentence_rep\n self.sentence_rep = tf.nn.l2_normalize(self.sentence_rep, 1)\n return", "def generate_sentence(self, t=20):\n result = [\"START\", \"START\"]\n\n for i in range(t-3):\n if result[-1] == \"STOP\":\n break\n\n match = {}\n for k,v in self.trigramcounts.items():\n if k[0] == result[-2] and k[1] == result[-1]:\n match[k[-1]] = v\n r = np.random.choice(list(match.keys()), p=np.array(list(match.values())) / np.sum(np.array(list(match.values()))))\n result.append(r)\n\n return result", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def generate_beam(env, dec, decoder_args, beam_size, length_penalty, early_stopping, max_len=100):\n\n # check inputs\n trg, enc_src, trg_mask, src_mask = decoder_args\n src_enc = enc_src\n src_len = enc_src \n\n #assert src_enc.size(0) == src_len.size(0)\n assert beam_size >= 1\n\n # batch size / number of words\n bs = len(src_enc)\n n_words = env.n_words\n breakpoint()\n\n # expand to beam size the source latent representations / source lengths\n src_enc = src_enc.unsqueeze(1).expand((bs, beam_size) + src_enc.shape[1:]).contiguous().view((bs * beam_size,) + src_enc.shape[1:])\n #src_len = src_len.unsqueeze(1).expand(bs, beam_size).contiguous().view(-1)\n\n # generated sentences (batch with beam current hypotheses)\n #generated = src_len.new(max_len, bs * beam_size) # upcoming output\n #generated.fill_(env.pad_index) # fill upcoming ouput with <PAD>\n #generated[0].fill_(env.eos_index) # we use <EOS> for <BOS> everywhere\n\n # generated hypotheses\n generated_hyps = [BeamHypotheses(beam_size, max_len, length_penalty, early_stopping) for _ in range(bs)]\n\n # positions\n positions = src_len.new(max_len).long()\n #positions = torch.arange(max_len, out=positions).unsqueeze(1).expand_as(generated)\n\n # scores for each sentence in the beam\n beam_scores = src_enc.new(bs, beam_size).fill_(0)\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1)\n\n # current position\n cur_len = 1\n\n # cache compute states\n cache = {'slen': 0}\n\n # done sentences\n done = [False for _ in range(bs)]\n breakpoint()\n while cur_len < max_len:\n dec(trg[:,:-1], enc_src, trg_mask, src_mask)\n # compute word scores\n tensor = decoder(\n x=generated[:cur_len],\n lengths=src_len.new(bs * beam_size).fill_(cur_len),\n positions=positions[:cur_len],\n causal=True,\n src_enc=src_enc,\n src_len=src_len,\n cache=cache\n )\n assert tensor.size() == (1, bs * beam_size, env.dim)\n tensor = tensor.data[-1, :, :] # (bs * beam_size, dim)\n scores = env.proj(tensor) # (bs * beam_size, n_words)\n scores = F.log_softmax(scores, dim=-1) # (bs * beam_size, n_words)\n assert scores.size() == (bs * beam_size, n_words)\n\n # select next words with scores\n _scores = scores + beam_scores[:, None].expand_as(scores) # (bs * beam_size, n_words)\n _scores = _scores.view(bs, beam_size * n_words) # (bs, beam_size * n_words)\n\n next_scores, next_words = torch.topk(_scores, 2 * beam_size, dim=1, largest=True, sorted=True)\n assert next_scores.size() == next_words.size() == (bs, 2 * beam_size)\n\n # next batch beam content\n # list of (bs * beam_size) tuple(next hypothesis score, next word, current position in the batch)\n next_batch_beam = []\n\n # for each sentence\n for sent_id in range(bs):\n\n # if we are done with this sentence\n done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(next_scores[sent_id].max().item())\n if done[sent_id]:\n next_batch_beam.extend([(0, env.pad_index, 0)] * beam_size) # pad the batch\n continue\n\n # next sentence beam content\n next_sent_beam = []\n\n # next words for this sentence\n for idx, value in zip(next_words[sent_id], next_scores[sent_id]):\n\n # get beam and word IDs\n beam_id = idx // n_words\n word_id = idx % n_words\n\n # end of sentence, or next word\n if word_id == env.eos_index or cur_len + 1 == max_len:\n generated_hyps[sent_id].add(generated[:cur_len, sent_id * beam_size + beam_id].clone().cpu(), value.item())\n else:\n next_sent_beam.append((value, word_id, sent_id * beam_size + beam_id))\n\n # the beam for next step is full\n if len(next_sent_beam) == beam_size:\n break\n\n # update next beam content\n assert len(next_sent_beam) == 0 if cur_len + 1 == max_len else beam_size\n if len(next_sent_beam) == 0:\n next_sent_beam = [(0, env.pad_index, 0)] * beam_size # pad the batch\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == beam_size * (sent_id + 1)\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == bs * beam_size\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_words = generated.new([x[1] for x in next_batch_beam])\n beam_idx = src_len.new([x[2] for x in next_batch_beam])\n\n # re-order batch and internal states\n generated = generated[:, beam_idx]\n generated[cur_len] = beam_words\n for k in cache.keys():\n if k != 'slen':\n cache[k] = (cache[k][0][beam_idx], cache[k][1][beam_idx])\n\n # update current length\n cur_len = cur_len + 1\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n # def get_coeffs(s):\n # roots = [int(s[i + 2]) for i, c in enumerate(s) if c == 'x']\n # poly = np.poly1d(roots, r=True)\n # coeffs = list(poly.coefficients.astype(np.int64))\n # return [c % 10 for c in coeffs], coeffs\n\n # visualize hypotheses\n # print([len(x) for x in generated_hyps], cur_len)\n # globals().update( locals() );\n # !import code; code.interact(local=vars())\n # for ii in range(bs):\n # for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):\n # hh = \" \".join(self.id2word[x] for x in ww.tolist())\n # print(f\"{ss:+.4f} {hh}\")\n # # cc = get_coeffs(hh[4:])\n # # print(f\"{ss:+.4f} {hh} || {cc[0]} || {cc[1]}\")\n # print(\"\")\n\n # select the best hypotheses\n tgt_len = src_len.new(bs)\n best = []\n\n for i, hypotheses in enumerate(generated_hyps):\n best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]\n tgt_len[i] = len(best_hyp) + 1 # +1 for the <EOS> symbol\n best.append(best_hyp)\n\n # generate target batch\n decoded = src_len.new(tgt_len.max().item(), bs).fill_(env.pad_index)\n for i, hypo in enumerate(best):\n decoded[:tgt_len[i] - 1, i] = hypo\n decoded[tgt_len[i] - 1, i] = env.eos_index\n\n # sanity check\n assert (decoded == env.eos_index).sum() == 2 * bs\n\n return decoded, tgt_len, generated_hyps", "def build_generator(\n timesteps: int,\n latlon_dense_units: int,\n concat_dense_units: int,\n lstm_units: int,\n latent_dim: int,\n lstm_reg: float,\n vocab_sizes: Dict[str, int],\n):\n\n # Add random noise input\n inputs, emb_traj = build_inputs(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n latent_dim,\n vocab_sizes,\n noise=True,\n mask=True,\n )\n lstm_cell = layers.LSTM(\n units=lstm_units,\n batch_input_shape=(None, timesteps, latent_dim),\n return_sequences=True,\n recurrent_regularizer=regularizers.l1(lstm_reg),\n )(emb_traj)\n latlon_output = layers.TimeDistributed(\n layers.Dense(2, activation=\"tanh\"), name=\"output_latlon\"\n )(lstm_cell)\n outputs = [latlon_output]\n for key, val in vocab_sizes.items():\n out = layers.TimeDistributed(layers.Dense(val, activation=\"softmax\"), name=f\"output_{key}\")(\n lstm_cell\n )\n outputs.append(out)\n # pass the mask through so the loss function can use it\n outputs.append(layers.Lambda(lambda x: x, name=\"output_mask\")(inputs[-1]))\n return Model(inputs=inputs, outputs=outputs, name=\"generator\")", "def _forward_kinematics_step(self, t_step):\n (s_1, s_12, s_123) = self._sines\n (c_1, c_12, c_123) = self._cosines\n self._x_1[t_step] = self._jnt_lengths[0] * s_1\n self._y_1[t_step] = self._jnt_lengths[0] * c_1\n self._x_2[t_step] = self._x_1[t_step] + self._jnt_lengths[1] * s_12\n self._y_2[t_step] = self._y_1[t_step] + self._jnt_lengths[1] * c_12\n self._x_e[t_step] = self._x_2[t_step] + self._jnt_lengths[2] * s_123\n self._y_e[t_step] = self._y_2[t_step] + self._jnt_lengths[2] * c_123", "def generate(nextLayerUnits, FirstLayerUnits):\n array = mat.arange(start=1, stop=(nextLayerUnits * (FirstLayerUnits + 1)) + 1, step=1)\n debugWeights = mat.divide(mat.reshape(mat.sin(array), (nextLayerUnits, (FirstLayerUnits + 1))), 10)\n\n return debugWeights" ]
[ "0.66800493", "0.66085994", "0.60627913", "0.59043974", "0.5894194", "0.5874639", "0.58046216", "0.5689095", "0.5540392", "0.5476205", "0.54213727", "0.53775567", "0.53557706", "0.53395146", "0.5328101", "0.53141636", "0.53015023", "0.52998585", "0.52972686", "0.52835363", "0.5273984", "0.5270543", "0.5270075", "0.5261124", "0.52513915", "0.5243105", "0.5219595", "0.5210245", "0.5206666", "0.52066106", "0.5196728", "0.5189399", "0.5187871", "0.5182666", "0.5180434", "0.5177107", "0.5177018", "0.51650417", "0.5155953", "0.5155722", "0.5148351", "0.514743", "0.51445466", "0.5134532", "0.5122507", "0.51122826", "0.5109567", "0.5102241", "0.50906605", "0.5070854", "0.5066676", "0.50657994", "0.50475013", "0.504539", "0.50405777", "0.503911", "0.50358075", "0.5029917", "0.50294477", "0.5026132", "0.5022391", "0.50138533", "0.5010438", "0.5008929", "0.5005298", "0.4998422", "0.49905574", "0.49852285", "0.49831623", "0.4981315", "0.49626842", "0.49621692", "0.49616525", "0.49578393", "0.49515468", "0.49501365", "0.4945809", "0.49360585", "0.49353784", "0.49302042", "0.49228936", "0.49139273", "0.49106243", "0.4898138", "0.48971087", "0.4896118", "0.4891909", "0.4891738", "0.48852107", "0.48843768", "0.48807788", "0.48770827", "0.486757", "0.48669258", "0.4864973", "0.48619074", "0.4861617", "0.48571292", "0.48539457", "0.48521546" ]
0.65616596
2
Return a cached copy of TestShib's metadata by reading it from disk
def metadata_callback(_request, _uri, headers): return (200, headers, self.read_data_file('testshib_metadata.xml')) # lint-amnesty, pylint: disable=no-member
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_metadata(self):\n return copy.copy(self.metadata)", "def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.environ.get('TRAVIS_BUILD_NUMBER', git.revision)\n split_version = git.version.split('.')\n split_version[-1] = revision\n version = '.'.join(split_version)\n metadata = {\n 'version': version,\n 'git_hash': git.hash,\n 'git_origin': git.origin,\n 'git_branch': git.branch,\n 'git_version': git.version\n }\n with open(METADATA_FILENAME, 'w') as fh:\n json.dump(metadata, fh)\n return metadata", "def _metadata_get(self, path):\n fd = self.fs.open(path, \"r\")\n # TODO iterate instead of assuming file < 4MB\n read_bytes = self.fs.read(fd, 0, 4096 * 1024)\n self.fs.close(fd)\n if read_bytes:\n return json.loads(read_bytes.decode())\n else:\n return None", "def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])", "def metadata(self):\n return parse_metadata(self.metadata_path())", "def get_metadata(self):\n self.log = jsonLogs()\n log_filename = JSON_DIR + '/' + MEASUREMENTS_REPO + '/' + self.filename\n \n # keeping the first metadata read in the file\n # TODO : handling metadata changes during experiment ?\n meta = self.log.read_metadata(log_filename)\n return(meta[0])", "def get_metadata (self, name):\n return self.metadata.get(name)", "def read_mock_data(self):\n if self.name in self.bucket.mock_state():\n return self.bucket.mock_state()[self.name][0]\n else:\n raise boto.exception.S3ResponseError(404, 'Not Found')", "def metadata(self) -> Dict:\n # Lazy load the metadata\n if self._metadata is not None:\n return self._metadata\n\n # Initialize metadata\n self._metadata = {}\n # Find wich bucket the package belong to\n bucket_dir = os.path.join(self.scoop_root, \"buckets\")\n buckets = os.listdir(bucket_dir)\n metadata_json = None\n for bucket in buckets:\n metadata_file = os.path.join(\n bucket_dir, bucket, \"bucket\", f\"{self.name}.json\"\n )\n if os.path.isfile(metadata_file):\n with open(metadata_file) as file:\n metadata_json = json.load(file)\n break\n\n if metadata_json is None:\n logger.error(\"Could not find package metadata\")\n return self._metadata\n\n self._metadata = metadata_json\n return self._metadata", "def get_metadata_from_path(path):\n try:\n import yaml\n # assumes index card is in the top-level of path\n index_card = os.path.join(path, \"M_index.yaml\")\n with open(index_card, \"r\") as stream:\n file_info = yaml.safe_load(stream)\n\n metadata_dict = {}\n metadata_dict[\"book_id\"] = file_info[\"book_id\"]\n metadata_dict[\"timestamp_start\"] = file_info[\"start_time\"]\n metadata_dict[\"type\"] = file_info[\"type\"]\n metadata_dict[\"obsid\"] = _convert_book_id_to_obsid(file_info[\"book_id\"])\n # get optional bits\n if \"stop_time\" in file_info:\n metadata_dict[\"timestamp_end\"] = file_info[\"stop_time\"]\n if \"observatory\" in file_info:\n metadata_dict[\"observatory\"] = file_info[\"observatory\"]\n if \"telescope\" in file_info:\n metadata_dict[\"telescope\"] = file_info[\"telescope\"]\n if \"stream_ids\" in file_info:\n metadata_dict[\"stream_ids\"] = file_info[\"stream_ids\"]\n if \"subtype\" in file_info:\n metadata_dict[\"subtype\"] = file_info[\"subtype\"]\n if \"tags\" in file_info:\n metadata_dict[\"tags\"] = file_info[\"tags\"]\n if \"scanification\" in file_info:\n metadata_dict[\"scanification\"] = file_info[\"scanification\"]\n if \"hwp_rate_hz\" in file_info:\n metadata_dict[\"hwp_rate_hz\"] = file_info[\"hwp_rate_hz\"]\n if \"sequencer_ref\" in file_info:\n metadata_dict[\"sequencer_ref\"] = file_info[\"sequencer_ref\"]\n return metadata_dict\n except (ImportError, FileNotFoundError, KeyError):\n pass\n\n return None", "def readMetaInfo(self):\n\t\tdata = self._fileSystem.readMetaInfo()\n\t\treturn data", "def meta(self):\n if not hasattr(self, '_meta'):\n self._meta = {}\n meta_fn = os.path.join(self.path, 'meta.json')\n if os.path.exists(meta_fn):\n meta_file = open(meta_fn)\n try:\n self._meta.update(json.load(meta_file))\n finally:\n meta_file.close()\n return self._meta", "def get_metadata(self, filename):\n return self.execute_json(filename)[0]", "def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA", "def getfilemeta(path):\n if os.path.isfile(path):\n meta = os.stat(path)\n return (meta)\n else:\n raise Exception('File not exist')", "def get_metadata(self):\n return self.manager.get_metadata(self)", "def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def _load_metadata(self, datapath):\n try:\n metadata = Metadata(datapath)\n return metadata\n except RuntimeError:\n print('Metadata does not exist. Please double check your datapath.')\n return None", "def read_metadata(self, file_in_cache):\n metadata_file = self.get_metadata_file(file_in_cache)\n if self.context.is_file(metadata_file):\n return json.loads(auto_decode(self.context.read_file(metadata_file)))\n else:\n return {}", "def get_metadata_file(self, file_in_cache):\n return re.sub(r'\\.tar$', '.json', file_in_cache)", "def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint", "def _fetch_current_local_metadata():\n if not os.path.exists(LOCAL_METADATA_FILE):\n return {}\n\n with open(LOCAL_METADATA_FILE) as f:\n return json.loads(f.read())", "def metadata(self):\n return copy.copy(self._metadata)", "def _init():\n cache_file = _get_buckets_cache_filename()\n exp = time.time() - S3_CACHE_EXPIRE\n\n # check mtime of the buckets files cache\n metadata = None\n try:\n if os.path.getmtime(cache_file) > exp:\n metadata = _read_buckets_cache_file(cache_file)\n except OSError:\n pass\n\n if metadata is None:\n # bucket files cache expired or does not exist\n metadata = _refresh_buckets_cache_file(cache_file)\n\n return metadata", "def GetMetadata(self):\n return self.dict['meta']", "def load_data(self):\n return self._load_data(\"--codemeta-file\")", "def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c", "def get_metadata(self):\n return self._metadata", "def metadata_file(self):\n return self._metadata_file", "def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data", "def metadata(self):\n return copy.deepcopy(self._metadata)", "def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)", "def getMetadata(self):\n\n # keep variables local so they are not stored in memory\n meta, units = self.getDefaultMeta()\n\n # check each available file for header information\n # sequence is important since later calls overwrite earlier ones so if a header is present in \"psd\" and\n # \"data\", the value from \"data\" will be returned\n if self.ts:\n # get header data from file\n metaTmp, unitsTmp = self.ts.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set time series unit\n unitsTmp['timeseries'] = 'V'\n\n # update the dictionaries with newly found values\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.psd:\n metaTmp, unitsTmp = self.psd.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n # also, 'nSamples' and 'samplingRate' in reality refer to the underlying timeseries data\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set psd unit\n unitsTmp['psd'] = 'V^2 / Hz'\n\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.data:\n metaTmp, unitsTmp = self.data.getMetadata()\n\n # rename variables for the sake of consistency and compatibility with Matlab and because the naming is\n # confusing: samplingRate is actually the acquisition rate since the DAQ card averages the data already\n # the sampling rate should describe the actual time step between data points not something else\n if 'recordingRate' in metaTmp:\n self.renameKey('samplingRate', 'acquisitionRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('recordingRate', 'samplingRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('nSamples', 'nAcquisitionsPerSample', meta=metaTmp)\n\n # add trial number\n metaTmp['trial'] = self.data.getTrialNumber()\n\n # update dictionaries\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n # add title string to metadata, used for plots\n self.setTitle(meta)\n\n # make sure all axes have the beadDiameter\n meta['pmY']['beadDiameter'] = meta['pmX']['beadDiameter']\n units['pmY']['beadDiameter'] = units['pmX']['beadDiameter']\n meta['aodY']['beadDiameter'] = meta['aodX']['beadDiameter']\n units['aodY']['beadDiameter'] = units['aodX']['beadDiameter']\n\n # add trap names\n meta['traps'] = meta.subDictKeys()\n\n return meta, units", "def read_metafile(path):\n with codecs.open(path, \"rb\", \"utf-8\") as f:\n return f.read()", "def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data", "def _read_cache_file(self) -> bytes:\n with open(self.cache_file, 'rb') as file:\n return file.read()", "def _retrieveCachedData(self):", "def _fake_meta(self):\n resp = tju.load_file(UPLOADED_FILE, self.adpt)\n return vf.File.wrap(resp)", "def make_test_metadata(path):\n assert path, 'Please supply a nonempty path to store test dataset.'\n return create_test_dataset('file://{}'.format(path), range(ROWS_COUNT))", "def get_metadata(self, file_id):\n pass", "def data(self):\r\n if self.metadata is None:\r\n self.refresh()\r\n return self.metadata", "def get_metadata(self):\n return gdal.Open(self.filename).GetMetadata()", "def extract_metadata(self):\n metadata_file_path = self.create_metadata_file(\".metadata.txt\")\n mt = self.mimetype\n metadata_processing_method = self.metadata_mimetype_methods.get(mt)\n if metadata_processing_method:\n # TODO: should we return metadata and write it here instead of in processing method?\n metadata_processing_method(metadata_file_path)", "def get_metadata(\n self,\n digest: Optional[Digest] = None,\n ignore_errors: bool = True,\n ) -> BareAsset:\n ...", "def get(self):\n return self._metadata", "def get_meta(filename):\n with fiona.open(filename) as collection:\n return collection.meta", "def Metadata():\n def _CreateMetadata(unused_none):\n global _metadata\n if not _metadata:\n _metadata = _GCEMetadata()\n _metadata_lock.lock(function=_CreateMetadata, argument=None)\n _metadata_lock.unlock()\n return _metadata", "def meta_info(self):\n\n if not self.meta_was_built:\n self.__meta_info = self.build_meta(self.dataset_path, self.file_types)\n self.meta_was_built = True\n\n return self.__meta_info", "def get_metadata(self, idx=0):\n meta_data = {}\n with h5py.File(self.path) as h5:\n ds = h5[str(idx)]\n attrs = dict(ds.attrs)\n for key in qpimage.meta.META_KEYS:\n if key in attrs:\n meta_data[key] = attrs[key]\n\n smeta = super(SingleRawOAHQpformatHDF5, self).get_metadata(idx)\n meta_data.update(smeta)\n return meta_data", "def _metadata(self) -> Dict[str, Any]:\n return self.__metadata", "def data(self):\n if self._data is None:\n try:\n with open(self.storage_path, 'r') as cache_file:\n self._data = json.load(cache_file)\n except FileNotFoundError:\n self._data = {}\n return self._data", "def get_metadata(self):\n session_path = Path(self.source_data['folder_path'])\n session_id = session_path.stem\n metadata = NeuroscopeRecordingInterface.get_ecephys_metadata(\n xml_file_path=str((session_path / f\"{session_id}.xml\").absolute())\n )\n metadata.update(UnitProperties=[])\n return metadata", "def get_metadata(self):\n self.metadata = Metadata()\n document = openxmllib.openXmlDocument(path=self.path)\n self.metadata.add(document.allProperties, \"ooxml\")\n return self.metadata", "def get_metadata(self, tsid):\n return self._metadata.get(tsid)", "def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata", "def get_metadata(\n self,\n digest: Optional[Digest] = None,\n ignore_errors: bool = True,\n ) -> DandisetMeta:\n with open(self.filepath) as f:\n meta = yaml_load(f, typ=\"safe\")\n return DandisetMeta.unvalidated(**meta)", "def get_staticdata(self):\n return self.get_metadata()", "def tmpcache(self, tmpdir):\n cache = tmpdir.join('base-trusty-amd64.tgz')\n cache.write('testcachedata')\n return cache", "def read(self, file, path):\n pos, = struct.unpack('<Q', file.read(8))\n if pos == 0:\n raise VergeMLError(\"Invalid cache file: {}\".format(path))\n file.seek(pos)\n self.index, self.meta, self.info = pickle.load(file)", "def getImageInfo(self, path, timestamp=None):\n\n key = self.generateCacheKey(path, timestamp)\n if not key in self.cache:\n info = self.fetchInfo(path)\n self.cache[key] = info\n\n return self.cache[key]", "def _store_package_metadata(self):", "def refresh(self):\r\n self.metadata = self.db.read(self.path).json()", "def extract_metadata(self):\n if self.is_generatable_file:\n logger.debug(\"Converting collected details to dict..\")\n if self.metadata_collector:\n self.metadata = MetadataToDict(\n metadata_collector=self.metadata_collector,\n file_import=self.file_import,\n )\n self.metadata.build_integration_dict()", "def metadata(host_directory_path):\n return create_volume_string(host_directory_path, \"/bbx/metadata\", False)", "def get_preset_metadata(self, filename):\r\n\r\n raise NotImplementedError", "def getInfo(self):\n self.info = requests.get(G.api + self.testId + '/snapshots/' + self.hash, auth=(G.username, G.authkey)).json()\n return self.info", "def xontrib_metadata():\n impres = None\n pkg_resources = None\n\n # NOTE: Reduce all of these alternate implementations when the minimum Python\n # is >=3.7\n try:\n # Python 3.7\n import importlib.resources as impres\n except ImportError:\n try:\n # Optional backport for <3.7\n import importlib_resources as impres\n except ImportError:\n try:\n # Try the slower and clunkier pkg_resources\n # This is only available if setuptools is part of the environment\n import pkg_resources\n except ImportError:\n pass\n\n if impres:\n with impres.open_text(\"xonsh2\", \"xontribs.json\") as f:\n md = json.load(f)\n elif pkg_resources:\n # Despite the name, this is a bytes\n bytesdata = pkg_resources.resource_string(\"xonsh2\", \"xontribs.json\")\n md = json.loads(bytesdata.decode(\"utf-8\"))\n else:\n path = os.path.join(os.path.dirname(__file__), \"xontribs.json\")\n with open(path, \"r\") as f:\n md = json.load(f)\n\n return md", "def metadata(self):\r\n return self._metadata", "def test_filesystem_can_get_attributes_of_file(self):\n time.time = MagicMock(return_value=time.time())\n self.index.photos_directory_exists = MagicMock(return_value=False)\n self.index.photos_file_exists = MagicMock(\n return_value=123000 # returns filesize\n )\n\n expected = {\n 'st_atime': time.time(),\n 'st_ctime': time.time(),\n 'st_gid': os.getgid(),\n 'st_mode': File('').ST_MODE,\n 'st_mtime': time.time(),\n 'st_size': 123000,\n 'st_uid': os.getuid(),\n }\n\n attr = self.filesystem._attributes(\n '/example.com/2019-01-13H20:00/index.png'\n )\n self.assertEqual(expected, attr)\n self.index.photos_file_exists.assert_called_with(\n domain='example.com',\n captured_at='2019-01-13H20:00',\n full_filename='/index.png',\n refresh_rate=self.refresh_rate\n )", "def extract_metadata():\n\n create_output(ARGS.out)\n index = pre.pixz.read_index(ARGS.traffic)\n\n try:\n tmp = tempfile.mkdtemp(prefix=\"ictf2017_cache_\")\n print(\"Using temporary cache for extracted files at {}\".format(tmp))\n\n file_indexes = [i for i in range(len(index))\n if (i >= ARGS.start and i <= ARGS.stop)]\n\n # a wrapper which measures execution times and calculates eta\n eta = pre.timing.ETACalculator(len(file_indexes))\n\n for count, i in enumerate(file_indexes):\n print(\"\\nProcessing index {} from [{}, {}]\"\n .format(i, min(file_indexes), max(file_indexes)))\n\n def extract_read_append_remove():\n pcapfile = pre.pixz.extract_pcap(ARGS.traffic, index[i], tmp)\n metadata = pre.pcap.read(pcapfile)\n append_output(metadata, ARGS.out)\n os.remove(pcapfile)\n\n eta.execute(count, extract_read_append_remove)\n\n finally:\n shutil.rmtree(tmp)\n print(\"Cleaned up temporary cache {}\\n\\n\".format(tmp))", "def get_metadata(self):\n metadata = {}\n for k in self.metadata_keys:\n metadata[k] = copy.copy(getattr(self, k))\n return metadata", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def get_source_metadata(self, src_name: Union[str, SourceName]) -> Dict:\n if isinstance(src_name, SourceName):\n src_name = src_name.value\n if src_name in self._cached_sources:\n return self._cached_sources[src_name]\n else:\n metadata = self.metadata.get_item(Key={\"src_name\": src_name}).get(\"Item\")\n self._cached_sources[src_name] = metadata\n return metadata", "def get_metadata(self):\n return {}", "def metadata(self):\r\n return resources.Metadata(self)", "def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))", "def get(self, path):\n\t\treturn self.cache.get(path)", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result", "def get_cache(self):\n self._topo.create_cache()\n with open('/run/geopm-service/geopm-topo-cache') as fid:\n result = fid.read()\n return result", "def _exif_data(self):\n return exif.get_metadata(self._filename)", "def load(self):\n if not path.exists('service.json'):\n raise UserError('service.json not found')\n with open('service.json') as f:\n try:\n metadata = json.loads(f.read())\n except Exception as e:\n raise UserError('malformed service.json - ' + str(e))\n return metadata", "def metadata(self, truncate: bool = False) -> Tuple[str, str]:\n\t\tif not self._closed:\n\t\t\tfilename = self.filename\n\t\t\tmd_filename = \"%s.file_md.json.gzip\" % (self.file_path)\n\t\t\tmd_mod_filename = \"%s.file_md.lastmod.gzip\" % (self.file_path)\n\t\t\tlogging.debug(\"Expanding metada (stored as %s.file_md.json.gzip)\", filename)\n\n\t\t\tlast_mod = self.last_modified()\n\t\t\tif os.path.isfile(md_filename):\n\t\t\t\tlogging.debug(\" Found previously extracted JSON file\")\n\t\t\t\tif truncate:\n\t\t\t\t\tself.clear_metadata()\n\t\t\t\telse:\n\t\t\t\t\tmd_json = load_gzipped_json_string(md_filename)\n\t\t\t\t\tmd_mod = load_gzipped_json_string(md_mod_filename)\n\t\t\t\t\tmd_parsed = json.loads(md_json)\n\t\t\t\t\t# check if cached metadata is up to date and\n\t\t\t\t\t# points to correct project folder and filename\n\t\t\t\t\t# if so return cache, otherwise clear it\n\t\t\t\t\tlogging.debug(\" md_mod: %s\", md_mod)\n\t\t\t\t\tlogging.debug(\" last_mod: %s\", last_mod)\n\t\t\t\t\tif md_mod != last_mod or md_parsed.project != self.project or md_parsed.filename != filename:\n\t\t\t\t\t\tself.clear_metadata()\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogging.debug(\" Cache up to date\")\n\t\t\t\t\t\treturn (md_json, last_mod)\n\n\t\t\tds = self.ds\n\t\t\tattrs = ds.attrs.keys()\n\t\t\ttitle = filename if \"title\" not in attrs else ds.attrs.title\n\t\t\tdescr = \"\" if \"description\" not in attrs else ds.attrs.description\n\t\t\turl = \"\" if \"url\" not in attrs else ds.attrs.url\n\t\t\tdoi = \"\" if \"doi\" not in attrs else ds.attrs.doi\n\t\t\t# converts compact ISO timestamps to human-readable ones.\n\t\t\t# Example: \"20180130T155028.262458Z\" becomes \"2018/01/13 15:50\"\n\t\t\tlast_mod_humanreadable = \"{}/{}/{} {}:{}:{}\".format(last_mod[0:4], last_mod[4:6], last_mod[6:8], last_mod[9:11], last_mod[11:13], last_mod[13:15])\n\t\t\t# default to last_modified for older files that do\n\t\t\t# not have a creation_date field\n\t\t\tcreation_date = last_mod_humanreadable if \"creation_date\" not in attrs else ds.attrs.creation_date\n\t\t\t# get arbitrary col/row attribute, they are all lists\n\t\t\t# of equal size. The length equals total cells/genes\n\t\t\ttotal_cells = ds.shape[1]\n\t\t\ttotal_genes = ds.shape[0]\n\n\t\t\tmd_data = {\n\t\t\t\t\"project\": self.project,\n\t\t\t\t\"filename\": filename,\n\t\t\t\t\"dataset\": filename,\n\t\t\t\t\"title\": title,\n\t\t\t\t\"description\": descr,\n\t\t\t\t\"url\": url,\n\t\t\t\t\"doi\": doi,\n\t\t\t\t\"creationDate\": creation_date,\n\t\t\t\t\"lastModified\": last_mod_humanreadable,\n\t\t\t\t\"totalCells\": total_cells,\n\t\t\t\t\"totalGenes\": total_genes,\n\t\t\t}\n\t\t\tlogging.debug(\" Saving extracted metadata as JSON file\")\n\t\t\tmd_json = json.dumps(md_data)\n\t\t\tsave_gzipped_json_string(md_filename, md_json)\n\t\t\tsave_gzipped_json_string(md_mod_filename, json.dumps(last_mod))\n\t\t\treturn (md_json, last_mod)\n\t\treturn None", "def read(self, source):\n _source = self._source_prefix+source\n return self.cache[_source]", "def get_contents( path, name, verbose = False, get_config=lambda: {} ):\n t_file_fh, t_file_name = tempfile.mkstemp()\n os.close(t_file_fh)\n try:\n fs_mod.fs_get( path+\"/\"+name, t_file_name, get_config )\n except:\n if verbose:\n print(\"get_contents exception:\",traceback.format_exc(), file=sys.stderr)\n return \"\"\n contents = open(t_file_name,\"r\").read()\n os.remove(t_file_name)\n return contents", "def get_metadata(self, keys, value, version=None):\n path = make_metadata_path(keys, value, version)\n url = '{root}/{path}'.format(root=self._root, path=path)\n\n try:\n r = requests.get(url)\n text = r.text\n\n self._write_cache(path, text)\n except (requests.ConnectionError, requests.Timeout):\n text = self._read_cache(path)\n\n try:\n data = yaml.load(text)\n except yaml.YAMLError:\n raise ValueError('Failed to read or parse YAML at %s' % url)\n\n return data", "def get_metadata(self):\n previous = DirectoryMetadata.load_pickle(self)\n metadata = {}\n\n for dirpath, dirnames, filenames in os.walk(self.prefix_dir):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n relative_path = path.split(self.base_dir, 1)[1]\n try:\n stats = os.stat(path)\n except OSError:\n log.exception('Error stating a file on disk while building up metadata, skipping file %s' % path)\n continue\n swift_bytes = stats.st_size\n mtime = datetime.utcfromtimestamp(stats.st_mtime)\n if (previous is not None) and (relative_path in previous.metadata) and\\\n (previous.metadata[relative_path].bytes == swift_bytes):\n swift_hash = previous.metadata[relative_path].hash\n else:\n try:\n with open(path, 'rb') as afile:\n md5_hash = hashlib.md5()\n md5_hash.update(afile.read())\n swift_hash = md5_hash.hexdigest()\n except OSError:\n log.exception('Error reading a file to create the md5 while building up metadata, skipping file %s' % path)\n continue\n\n metadata[relative_path] = FileMetadata(relative_path, swift_bytes, mtime, swift_hash)\n\n return metadata", "def getData(self, local_cache):", "def cache_body(self):\n with open(self.path, \"rb\") as fh:\n fh.seek(fh.tell(), os.SEEK_END)\n fh.seek(max(0, fh.tell()-LEN_CACHE_BYTES), os.SEEK_SET)\n return fh.read(LEN_CACHE_BYTES).decode('utf-8') #.split(\"\\n\")", "def read_cache(cc):\n \n out_file = os.path.join(cc.scene_dir, 'output', cc.scene_id+'_pickle')\n if cc.atmo_src == 'narr':\n out_file += '_narr'\n elif cc.atmo_src == 'merra':\n out_file += '_merra'\n \n if not os.path.isfile(out_file):\n raise OSError('pickle_file is not in expected location %s' % out_file) \n\n with open(out_file, 'rb') as f:\n x = pickle.load(f)\n return x", "def snapshot_info(self) -> MetaFile:\n raise NotImplementedError", "def _load_data(self):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n if not os.path.exists(path):\n raise IOError('Data cache missing at %s' % path)\n\n f = bz2.BZ2File(path)\n data = pickle.loads(f.read())\n f.close()\n\n return data", "def get_cached_item(self, path):\n item_path = '%s/%s' % (\n self.cache_folder,\n path.strip('/')\n )\n\n try:\n cf_object = self.container.get_object(item_path)\n except NoSuchObject:\n return False\n\n f = tempfile.NamedTemporaryFile()\n f.write(cf_object.fetch())\n f.seek(0)\n image = Image.open(f.name)\n f.close()\n\n return image", "def load_cache(self, filename):\n output_df = cudf.read_hdf(filename, key=self.uid)\n return output_df", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def metadata(self):\n return self._metadata", "def getCacheContents(self):\n return self._cache" ]
[ "0.6429627", "0.63208026", "0.6306309", "0.62240237", "0.6207978", "0.6194947", "0.6191376", "0.61298776", "0.61292166", "0.6115907", "0.60866165", "0.60811806", "0.6044135", "0.6037326", "0.6034143", "0.6017177", "0.5996634", "0.59623575", "0.594425", "0.5941117", "0.59093994", "0.5895662", "0.5874621", "0.5865159", "0.58533543", "0.5849117", "0.58464795", "0.5844417", "0.5842328", "0.58183736", "0.58047587", "0.5796988", "0.57916975", "0.5780975", "0.5772", "0.5770188", "0.57494277", "0.5718473", "0.5713579", "0.56958383", "0.56842667", "0.56742054", "0.56688726", "0.5663614", "0.56610334", "0.56586236", "0.56545603", "0.564257", "0.5640123", "0.5633285", "0.56324965", "0.5623618", "0.56233436", "0.56143415", "0.560669", "0.5606263", "0.5606039", "0.5605078", "0.56027263", "0.5598217", "0.55908996", "0.55894226", "0.5576002", "0.5574567", "0.5573856", "0.55681354", "0.55650425", "0.55634624", "0.5561554", "0.5558506", "0.55532783", "0.55496347", "0.5548082", "0.5548082", "0.5536122", "0.55330795", "0.5532461", "0.55292296", "0.55232304", "0.5506758", "0.5505173", "0.5504771", "0.5503365", "0.54978144", "0.5490455", "0.5479378", "0.5471154", "0.5464086", "0.54637057", "0.5462013", "0.5460128", "0.54563355", "0.54560167", "0.54494166", "0.54484713", "0.54420704", "0.54420704", "0.54420704", "0.54420704", "0.54420704", "0.54377925" ]
0.0
-1
Return a cached copy of TestShib's metadata with a cacheDuration attribute
def cache_duration_metadata_callback(_request, _uri, headers): return (200, headers, self.read_data_file('testshib_metadata_with_cache_duration.xml')) # lint-amnesty, pylint: disable=no-member
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])", "def get_metadata(self):\n return copy.copy(self.metadata)", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result", "def test_cache_datastore_manifests(self, cache_audio: bool):\n # Data setup\n random_seed = 42\n sample_rate = 16000\n num_examples = 10\n num_manifests = 2\n data_duration = 1.0\n\n # Generate random signals\n _rng = np.random.default_rng(seed=random_seed)\n\n # Input and target signals have the same duration\n data_duration_samples = int(data_duration * sample_rate)\n\n with tempfile.TemporaryDirectory() as test_dir:\n test_store_dir = os.path.join(test_dir, 'store')\n os.mkdir(test_store_dir)\n\n # Prepare metadata and audio files\n manifest_filepaths = []\n audio_files = []\n for m in range(num_manifests):\n manifest_dir = os.path.join(test_store_dir, f'manifest_{m}')\n os.mkdir(manifest_dir)\n manifest_filepath = os.path.join(manifest_dir, 'manifest.json')\n\n metadata = []\n data = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples, num_examples))\n for n in range(num_examples):\n audio_filepath = f'manifest_{m}_audio_{n:02d}.wav'\n audio_file = os.path.join(manifest_dir, audio_filepath)\n # Write audio file\n sf.write(audio_file, data[:, n], sample_rate, 'float')\n # Update metadata\n metadata.append(\n {\n 'audio_filepath': audio_filepath,\n 'duration': data_duration,\n 'text': f'text for example {n:02d}',\n }\n )\n # Update audio files\n audio_files.append(audio_file)\n\n # Save manifest\n write_manifest(manifest_filepath, metadata)\n manifest_filepaths.append(manifest_filepath)\n\n # Cache location\n test_cache_dir = os.path.join(test_dir, 'cache')\n\n # Instead of using AIS, copy object from store dir to cache dir\n def fake_get(self):\n # Object path relative to store path\n object_path = os.path.relpath(self.store_path, start=test_store_dir)\n # Copy to fake local path\n self._local_path = os.path.join(test_cache_dir, object_path)\n os.makedirs(os.path.dirname(self.local_path), exist_ok=True)\n shutil.copy(self.store_path, self.local_path)\n # Return path as in the original get\n return self.local_path\n\n with mock.patch(\n 'nemo.collections.asr.data.audio_to_text.is_datastore_path', lambda x: True\n ), mock.patch.object(DataStoreObject, 'get', fake_get):\n # Use a single worker for this test to avoid failure with mock & multiprocessing (#5607)\n cache_datastore_manifests(manifest_filepaths, cache_audio=cache_audio, num_workers=1)\n\n # Manifests need to be compared\n store_files_to_compare = manifest_filepaths\n if cache_audio:\n # Audio needs to be compared\n store_files_to_compare += audio_files\n\n # Compare files\n for f_store in store_files_to_compare:\n f_cache = os.path.join(test_cache_dir, os.path.relpath(f_store, test_store_dir))\n assert filecmp.cmp(f_store, f_cache, shallow=False), f'Files {f_store} and {f_cache} do not match.'", "def generate_statistics():\r\n statistics = cache.get('statistics')\r\n if statistics is None:\r\n statistics = {}\r\n statistics['nr_hashtags'] = ('Number of Hashtags',\r\n get_number_hashtags())\r\n statistics['nr_tokens'] = ('Number of Tokens', get_token_count())\r\n statistics['media_storage_size'] = ('Storage Folder Size (MB)',\r\n str(get_folder_size(\r\n cfg['media_storage'])))\r\n\r\n cache.set('statistics', statistics,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return statistics", "def test_set_cache_timeout():\n my_accessor = RallyAccessor('uname', 'pword', 'base_url')\n my_accessor.set_cache_timeout('object_name', 10)\n\n assert_equal(my_accessor.cache_timeouts, {'object_name': 10})", "def extract_metadata():\n\n create_output(ARGS.out)\n index = pre.pixz.read_index(ARGS.traffic)\n\n try:\n tmp = tempfile.mkdtemp(prefix=\"ictf2017_cache_\")\n print(\"Using temporary cache for extracted files at {}\".format(tmp))\n\n file_indexes = [i for i in range(len(index))\n if (i >= ARGS.start and i <= ARGS.stop)]\n\n # a wrapper which measures execution times and calculates eta\n eta = pre.timing.ETACalculator(len(file_indexes))\n\n for count, i in enumerate(file_indexes):\n print(\"\\nProcessing index {} from [{}, {}]\"\n .format(i, min(file_indexes), max(file_indexes)))\n\n def extract_read_append_remove():\n pcapfile = pre.pixz.extract_pcap(ARGS.traffic, index[i], tmp)\n metadata = pre.pcap.read(pcapfile)\n append_output(metadata, ARGS.out)\n os.remove(pcapfile)\n\n eta.execute(count, extract_read_append_remove)\n\n finally:\n shutil.rmtree(tmp)\n print(\"Cleaned up temporary cache {}\\n\\n\".format(tmp))", "def cache(self):\n return self.payload.setdefault(self._CACHE_ATTRIBUTE, {})", "def cache(self):\n return {'output': self.output, 'series': self.series}", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def Metadata():\n def _CreateMetadata(unused_none):\n global _metadata\n if not _metadata:\n _metadata = _GCEMetadata()\n _metadata_lock.lock(function=_CreateMetadata, argument=None)\n _metadata_lock.unlock()\n return _metadata", "def _init():\n cache_file = _get_buckets_cache_filename()\n exp = time.time() - S3_CACHE_EXPIRE\n\n # check mtime of the buckets files cache\n metadata = None\n try:\n if os.path.getmtime(cache_file) > exp:\n metadata = _read_buckets_cache_file(cache_file)\n except OSError:\n pass\n\n if metadata is None:\n # bucket files cache expired or does not exist\n metadata = _refresh_buckets_cache_file(cache_file)\n\n return metadata", "def get_cache(self):\n return self.cache", "def get_cache(self):\n return self._instance._cache[self.name]", "def get_metadata(self):\n metadata = {}\n for k in self.metadata_keys:\n metadata[k] = copy.copy(getattr(self, k))\n return metadata", "def metadata(self):\n return copy.copy(self._metadata)", "def GetMetadata(self):\n return self.dict['meta']", "def metadata(self):\n return copy.deepcopy(self._metadata)", "def metadata_processor(self):\n counts = {key: int(value) for key, value in\n self.redis.hgetall(self.metadata_cache_key).iteritems()}\n\n counts['cached'] = len(self.tweet_cache)\n\n metadata = {'counts': counts}\n log.debug(metadata)\n\n if self.is_queuing:\n rqworker.enqueue(self.metadata_processor_fct, metadata)\n else:\n self.metadata_processor_fct(metadata)", "def metadata(self, metadata):\n return Metadata(metadata)", "def for_shard(self) -> \"BenchmarkMetadata\":\n new_metadata = BenchmarkMetadata.create(self.suite_name)\n kwargs = asdict(new_metadata)\n keep_new_fields = [\n \"argv\",\n \"user\",\n \"hostname\",\n \"ram\",\n \"cpu_name\",\n \"cpu_count\",\n \"cpu_frequency\",\n \"gpu_names\",\n ]\n keep_old_fields = [\"timestamp\", \"run_id\"]\n must_match_fields = [\n \"suite_name\",\n \"py_ver\",\n \"tf_ver\",\n \"np_ver\",\n \"git_branch_name\",\n \"git_commit\",\n ]\n for field in keep_old_fields:\n del kwargs[field]\n for field in must_match_fields:\n assert getattr(self, field) == kwargs[field], (\n f\"Field {field} must match between new and old metadata.\"\n f\" Found {getattr(self, field)} and {kwargs[field]}\"\n )\n del kwargs[field]\n assert keep_new_fields == list(kwargs)\n\n return replace(self, **kwargs)", "def stats(self):\n if self.__cache:\n return {\n \"size\": self.__cache.currsize,\n \"maxsize\": self.__cache.maxsize,\n \"hits\": self._hits._value.get(),\n \"miss\": self._misses._value.get(),\n }\n else:\n return super(MemoryCache, self).stats()", "def metadata(self) -> global___SummaryMetadata:", "def cache(self) -> Optional[Sequence['outputs.SettingsPropertiesResponseCache']]:\n return pulumi.get(self, \"cache\")", "def getMetadata(self):\n\n # keep variables local so they are not stored in memory\n meta, units = self.getDefaultMeta()\n\n # check each available file for header information\n # sequence is important since later calls overwrite earlier ones so if a header is present in \"psd\" and\n # \"data\", the value from \"data\" will be returned\n if self.ts:\n # get header data from file\n metaTmp, unitsTmp = self.ts.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set time series unit\n unitsTmp['timeseries'] = 'V'\n\n # update the dictionaries with newly found values\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.psd:\n metaTmp, unitsTmp = self.psd.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n # also, 'nSamples' and 'samplingRate' in reality refer to the underlying timeseries data\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set psd unit\n unitsTmp['psd'] = 'V^2 / Hz'\n\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.data:\n metaTmp, unitsTmp = self.data.getMetadata()\n\n # rename variables for the sake of consistency and compatibility with Matlab and because the naming is\n # confusing: samplingRate is actually the acquisition rate since the DAQ card averages the data already\n # the sampling rate should describe the actual time step between data points not something else\n if 'recordingRate' in metaTmp:\n self.renameKey('samplingRate', 'acquisitionRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('recordingRate', 'samplingRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('nSamples', 'nAcquisitionsPerSample', meta=metaTmp)\n\n # add trial number\n metaTmp['trial'] = self.data.getTrialNumber()\n\n # update dictionaries\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n # add title string to metadata, used for plots\n self.setTitle(meta)\n\n # make sure all axes have the beadDiameter\n meta['pmY']['beadDiameter'] = meta['pmX']['beadDiameter']\n units['pmY']['beadDiameter'] = units['pmX']['beadDiameter']\n meta['aodY']['beadDiameter'] = meta['aodX']['beadDiameter']\n units['aodY']['beadDiameter'] = units['aodX']['beadDiameter']\n\n # add trap names\n meta['traps'] = meta.subDictKeys()\n\n return meta, units", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def test_cache_stats(self):\n data = {'a': 'b'}\n stats_file = self._write('stats.json', data)\n settings = {\n 'webpack.stats_file': stats_file,\n }\n state = WebpackState(settings)\n stats = state.load_stats(cache=True)\n self.assertEqual(data, stats)\n with open(stats_file, 'w') as ofile:\n json.dump({'b': 'c'}, ofile)\n second_stats = state.load_stats(cache=True)\n self.assertEqual(second_stats, stats)", "def metadata_from_str(self, metadata_str):\n with self._json_cache_lock:\n metadata = self._json_cache.get(metadata_str)\n if not metadata:\n metadata = bg_metric.MetricMetadata.from_json(metadata_str)\n self._json_cache[metadata_str] = metadata\n return metadata", "def getMetaData(self, outputDir = None, filetype = 'h5'):\n timeproc = np.array(self.meta['Time Processed'])\n timedisp = np.array(self.meta['Time Displayed'])\n timeread = np.array(self.meta['Time Read'])\n self.meta['Processing Time'] = timeproc - timeread\n self.meta['Displaying Time'] = timedisp - timeproc\n self.meta['Total Time'] = timedisp - timeread\n metaData = pd.DataFrame.from_dict(self.meta)\n if not outputDir == None:\n if filetype == 'h5':\n fileName = outputDir + 'metaData_{}.h5'.format(str(datetime.today().now())[:-7])\n metaData.to_hdf(fileName, key='metaData')\n elif filetype == 'csv':\n fileName = outputDir + 'metaData_{}.csv'.format(str(datetime.today().now())[:-7])\n metaData.to_csv(fileName, key='metaData')\n return metaData", "def stats(self):\n return super(NoneCache, self).stats()", "def test_cache_set_without_timeout(self):\n self.cache.set('superman', 'clark kent')\n self.cache.set('recipe', {'sugar': 2, 'wine': 5})\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n\n # Move time forward 10 years\n cache.datetime.now = lambda: datetime.now() + timedelta(days=10*365)\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})", "def _retrieveCachedData(self):", "def test_summary(self):\n cache = DummyCache()\n cache.upload(\"pkg1-0.3.tar.gz\", BytesIO(b\"test1234\"))\n cache.upload(\"pkg1-1.1.tar.gz\", BytesIO(b\"test1234\"))\n p1 = cache.upload(\n \"pkg1a2.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"1.1.1a2\", \"summary\"\n )\n p2 = cache.upload(\n \"pkg2.tar.gz\", BytesIO(b\"test1234\"), \"pkg2\", \"0.1dev2\", \"summary\"\n )\n summaries = cache.summary()\n self.assertCountEqual(\n summaries,\n [\n {\n \"name\": \"pkg1\",\n \"summary\": \"summary\",\n \"last_modified\": p1.last_modified,\n },\n {\n \"name\": \"pkg2\",\n \"summary\": \"summary\",\n \"last_modified\": p2.last_modified,\n },\n ],\n )", "def metadata(self):\r\n return resources.Metadata(self)", "def getCacheAttribute(animationCurve):\n cacheAttrName = '{}.{}'.format(animationCurve, constants.ANIMCACHEATTR)\n if not cmds.objExists(cacheAttrName):\n cmds.addAttr(animationCurve, longName=constants.ANIMCACHEATTR, dt='vectorArray')\n return cacheAttrName", "def get_metadata (self, name):\n return self.metadata.get(name)", "def stats():\n global CACHE, STATS_MISSES, STATS_HITS, STATS_KEYS_COUNT\n memory_address = \"0x\" + str(\"%X\" % id( CACHE )).zfill(16)\n return {'cache_memory_address': memory_address,\n 'hits': STATS_HITS,\n 'misses': STATS_MISSES ,\n 'keys_count': STATS_KEYS_COUNT,\n }", "def getCurrentCacheData(self):\n return self.getCacheData(int(self.currentFrameNumber - 1))", "def get_cache_info(self):\n\t\tdb_cursor = self.cache.query_source(self.name,\n\t\t\t[\"count(*)\", \"min(COLLECTED_DATE)\", \"max(COLLECTED_DATE)\"])\n\t\t(count, min_date, max_date) = db_cursor.fetchone()\n\t\treturn self.name, str(count), str(min_date), str(max_date)", "def get_metadata(self):\n return self._metadata", "def _metadata(self) -> Dict[str, Any]:\n return self.__metadata", "def test_custom_expire(self):\n settings = {\n 'webpack.cache_max_age': '1234',\n }\n state = WebpackState(settings, 'mypackage')\n self.assertEqual(state.cache_max_age, 1234)", "def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data", "def metadata(self):\n return parse_metadata(self.metadata_path())", "def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]", "def get_metadata(self):\n self.log = jsonLogs()\n log_filename = JSON_DIR + '/' + MEASUREMENTS_REPO + '/' + self.filename\n \n # keeping the first metadata read in the file\n # TODO : handling metadata changes during experiment ?\n meta = self.log.read_metadata(log_filename)\n return(meta[0])", "def get_source_metadata(self, src_name: Union[str, SourceName]) -> Dict:\n if isinstance(src_name, SourceName):\n src_name = src_name.value\n if src_name in self._cached_sources:\n return self._cached_sources[src_name]\n else:\n metadata = self.metadata.get_item(Key={\"src_name\": src_name}).get(\"Item\")\n self._cached_sources[src_name] = metadata\n return metadata", "def get_metadata(\n self,\n digest: Optional[Digest] = None,\n ignore_errors: bool = True,\n ) -> BareAsset:\n ...", "def cached(seconds, cattr=None):\n def wrapper(f):\n def function(*args, **kwargs):\n # Construct key from the function id and arguments.\n if cattr is None:\n key = (id(f), args, tuple(kwargs.items()))\n else:\n # Used for class methods, change args[0] to an attribute.\n kargs = args[1:]\n key = (id(f), id(type(args[0])),\n getattr(args[0], cattr), kargs,\n tuple(kwargs.items()))\n needresults = False\n with cache_lock:\n if key not in cache:\n needresults = True\n if needresults:\n results = f(*args, **kwargs)\n with cache_lock:\n cache[key] = (time.time(), results, seconds)\n return cache[key][1]\n return function\n\n return wrapper", "def _cache_get(self, metric_name):\n pass", "def testStats(self):\n\n stats = memcache.get_stats()\n self.assertEqual(\n set(['hits', 'items', 'bytes', 'oldest_item_age', 'misses',\n 'byte_hits']),\n set(stats.keys()))", "def get_cached_data_age(name):\n cache_path = get_cachefile('%s.cache' % name)\n if not os.path.exists(cache_path):\n return 0\n return time.time() - os.stat(cache_path).st_mtime", "def cache():\n if request.method == 'GET':\n cache_info = in_water.cache_info()\n return json.dumps({\n 'hits': cache_info.hits,\n 'misses': cache_info.misses,\n 'maxsize': cache_info.maxsize,\n 'currsize': cache_info.currsize,\n })", "def __init__(self, accessor, settings, name=None):\n super(MemoryCache, self).__init__(accessor, settings, name)\n self.__size = settings.get(\"size\", 1 * 1000 * 1000)\n self.__ttl = int(settings.get(\"ttl\", 24 * 60 * 60))\n self._max_size.set(self.__size)\n self.__cache = None", "def make_cache_table(metadata, table_name='beaker_cache', schema_name=None):\n return sa.Table(table_name, metadata,\n sa.Column('namespace', sa.String(255), primary_key=True),\n sa.Column('accessed', sa.DateTime, nullable=False),\n sa.Column('created', sa.DateTime, nullable=False),\n sa.Column('data', sa.PickleType, nullable=False),\n schema=schema_name if schema_name else metadata.schema)", "def get_metadata(self, object_id):\n buff = libplasma.get(self.conn, object_id)[1]\n return PlasmaBuffer(buff, object_id, self)", "def get_metadata(self):\n return {}", "def get_cache_path(self):", "def get_cache_path(self):", "def get_metadata(self):\n return self.manager.get_metadata(self)", "def load_cache():\n return {}", "def cache(self, name: str = None) -> B[B, E]:", "def test_untimed(self):\n cache = TimedCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "def cache_path(self):", "def cache_path(self):", "def test_cache_retrieved(self):\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n data = read.data.decode()\n self.assertIn(\n '<span class=\"expan\">et </span>', data,\n \"Text content should be transformed\"\n )\n self.assertIn(\n 'Facsimilaire', data,\n \"Other content should be added\"\n )\n\n cached = self.cache.get(\"urn:cts:froLit:jns915.jns1856.ciham-fro1:1\").decode()\n self.assertIn('<aside class=\"text-left\">', cached, \"Assert cache is made\")\n\n with mock.patch(\"nemo_xslttwo_plugin.shell\") as shell:\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n cached_response = read.data.decode()\n self.assertEqual(\n cached_response, data,\n \"Text content should the same in cache\"\n )\n self.assertEqual(\n shell.call_count, 0,\n \"Shell should not be called because we use cache\"\n )", "def __init__(self, location, option):\n super(MyCache, self).__init__(location, option)\n self.dcreate('ttl')", "def metadata_legacy(self):\n assert len(self.containers) == 1\n metadata = self.containers[0].metadata.copy()\n for k, v in self.build_config.items():\n assert k not in metadata\n metadata[k] = v\n return metadata", "def get_top_song_metadata():\n cache_buster = '?v=%s' % get_timestamp()\n response = requests.get(MUSIC_DIR + 'top_meta.json' + cache_buster)\n return response.json()", "def get_metadata(self, tsid):\n return self._metadata.get(tsid)", "def get_cache():\n if not Genre._cache:\n Genre._cache = ObjectCache(Genre().__class__)\n return Genre._cache", "def _get_metadata(self): \n metadata = {'DATA_TYPE':'Fourier Climatology'} \n \n area_bounds = self._area_inst.get_cube_area_bounds(self.cube, \n self.xy_coords)\n x_bounds = [area_bounds[self._area_inst.x_min], \n area_bounds[self._area_inst.x_max]]\n y_bounds = [area_bounds[self._area_inst.y_min], \n area_bounds[self._area_inst.y_max]]\n \n metadata['VARIABLE'] = self.cube.name()\n metadata['UNITS'] = str(self.cube.units)\n metadata['INITIALISATION_DATES'] = self.cube_init_dates\n metadata['DATES'] = self.cube_dates\n metadata[self.xy_coords[0].upper()+'_BOUNDS'] = x_bounds\n metadata[self.xy_coords[-1].upper()+'_BOUNDS'] = y_bounds\n \n # Find additional coordinates in cube and add them to metadata.\n for coord in self.cube.coords():\n if coord.name() not in self.unwanted_coords and \\\n coord.name() not in self._required_coords and \\\n coord.name() not in self.xy_coords:\n metadata[coord.name().upper()] = coord.points\n \n bound_names = [self.xy_coords[0].upper()+'_BOUNDS',\n self.xy_coords[-1].upper()+'_BOUNDS']\n \n return self.MetaData(metadata, bound_names)", "def unit_metadata(self):\n data = self.to_dict()\n metadata = [(k, v) for k, v in data.items() if k not in UNIT_KEYS]\n return metadata", "def update_cache():\n total = isic.get_image_count()\n df = pd.DataFrame()\n for meta in tqdm(isic.get_image_meta(), total=total, desc=\"Updating image list cache\", unit=\"img\"):\n _meta = json_normalize(meta)\n # File name and directory will be named after dataset.name.\n # Make it file system friendly. e.g. remove chars like /\n # Todo: This append stuff is painfully slow... Do something about it\n _meta[\"dataset.name\"] = slugify(_meta[\"dataset.name\"][0])\n df = df.append(_meta, ignore_index=True)\n df.to_csv(cache_file, index=False)", "def get_resource_cache_stats_provider() -> CacheStatsProvider:\n return _resource_caches", "def test_timed(self):\n time = 0.001\n cache = TimedCache(max_age=time)\n\n cache[1] = 1\n assert 1 in cache\n sleep(time)\n assert 1 not in cache\n with pytest.raises(KeyError):\n assert cache[1]\n\n for i in range(50):\n cache[i] = i\n assert i in cache\n assert cache[i] == i\n sleep(time)\n for i in range(50):\n assert i not in cache\n with pytest.raises(KeyError):\n assert cache[i]", "def test_install_metadata(tmp_path):\n msbuild_path = tmp_path / \"Visual Studio\" / \"MSBuild.exe\"\n install_metadata = {\n \"instanceId\": \"deadbeef\",\n \"installDate\": \"2022-07-14T10:42:37Z\",\n }\n visualstudio = VisualStudio(\n MagicMock(spec_set=ToolCache),\n msbuild_path=msbuild_path,\n install_metadata=install_metadata,\n )\n\n assert visualstudio.install_metadata[\"instanceId\"] == \"deadbeef\"\n assert visualstudio.install_metadata[\"installDate\"] == \"2022-07-14T10:42:37Z\"", "def metadata(self): # -> None:\n ...", "def cache_key(self):", "def fluidCacheInfo(*args, attribute: Union[AnyStr, bool]=\"\", cacheTime: Union[time, bool]=None,\n endFrame: bool=True, hasCache: bool=True, hasData: bool=True,\n initialConditions: bool=True, playback: bool=True, resolution: bool=True,\n startFrame: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def metadata(self) -> Metadata:\n return self._metadata", "def _get_cache_ttl(self, request, response):\n return None # use default ttl", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def get_benchmark_metadata(self, name, namespace=\"benchmark-operator\"):\n benchmark = self.get_benchmark(name, namespace)\n return {\n \"name\": benchmark[\"metadata\"][\"name\"],\n \"namespace\": benchmark[\"metadata\"][\"namespace\"],\n \"uuid\": benchmark.get(\"status\", {}).get(\"uuid\", \"Not Assigned Yet\"),\n \"suuid\": benchmark.get(\"status\", {}).get(\"suuid\", \"Not Assigned Yet\"),\n \"status\": benchmark.get(\"status\", {}).get(\"state\", \"\"),\n }", "def metadata(self) -> Dict:\n # Lazy load the metadata\n if self._metadata is not None:\n return self._metadata\n\n # Initialize metadata\n self._metadata = {}\n # Find wich bucket the package belong to\n bucket_dir = os.path.join(self.scoop_root, \"buckets\")\n buckets = os.listdir(bucket_dir)\n metadata_json = None\n for bucket in buckets:\n metadata_file = os.path.join(\n bucket_dir, bucket, \"bucket\", f\"{self.name}.json\"\n )\n if os.path.isfile(metadata_file):\n with open(metadata_file) as file:\n metadata_json = json.load(file)\n break\n\n if metadata_json is None:\n logger.error(\"Could not find package metadata\")\n return self._metadata\n\n self._metadata = metadata_json\n return self._metadata", "def setup(cls, path, cache_filename, **kwargs):\n cache_filepath = os.path.join(path, cache_filename)\n if not os.path.isfile(cache_filepath):\n with open(cache_filepath, 'w') as cache_file:\n json.dump({'start_time': None}, cache_file)", "def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.environ.get('TRAVIS_BUILD_NUMBER', git.revision)\n split_version = git.version.split('.')\n split_version[-1] = revision\n version = '.'.join(split_version)\n metadata = {\n 'version': version,\n 'git_hash': git.hash,\n 'git_origin': git.origin,\n 'git_branch': git.branch,\n 'git_version': git.version\n }\n with open(METADATA_FILENAME, 'w') as fh:\n json.dump(metadata, fh)\n return metadata", "def testExpirationTime(self):\n\n bye = \"Good bye!\"\n memcache.add('bye', bye, 1)\n assert memcache.get('bye') == bye\n time.sleep(2)\n assert memcache.get('bye') == None", "def build_metadata(meta):\n\n ret = copy.copy(meta) if meta else dict()\n\n ret['name'] = meta.get('name', '')\n\n if 'index' in meta:\n if isinstance(meta.get('index'), str):\n ret['index'] = Index(meta.get('index'))\n elif isinstance(meta.get('index'), Index):\n ret['index'] = meta.get('index')\n\n ret['utc'] = True\n if 'utc' in meta and isinstance(meta.get('utc'), bool):\n ret['utc'] = meta.get('utc')\n\n return pmap(ret)", "def GetResourceMetadata(self):\n result = super().GetResourceMetadata()\n if self.created:\n result['container_cluster_version'] = self.k8s_version\n return result", "def getData(self, local_cache):", "def get_created_time(self, name):\n return self.cache.get(name).time", "def cache_file_metadata(self, filenames):\n file_metadata = {}\n for fn in filenames:\n metadata = parse(fn)\n metadata['fn'] = fn[:-4]\n file_metadata_summary = self.gen_file_metadata_summary(metadata)\n file_metadata[file_metadata_summary] = metadata\n return file_metadata", "def add_metadata(data):\n data[\"last_downloaded\"] = util.utc_now()\n return data", "def meta(self):\n return FLMetaDict(self.h5.attrs)", "def test_get_metadata_for_rate_plan(self):\n pass" ]
[ "0.5946828", "0.5924196", "0.5874222", "0.58641565", "0.58568573", "0.5794843", "0.57618964", "0.5748162", "0.5736788", "0.5663821", "0.5662813", "0.5655036", "0.5647174", "0.56243145", "0.55787057", "0.5567786", "0.55402327", "0.5527116", "0.55239946", "0.55070686", "0.54851043", "0.54838306", "0.54830235", "0.54795015", "0.5472423", "0.5455209", "0.5450143", "0.5450143", "0.5450143", "0.5450143", "0.54356635", "0.54206145", "0.54133505", "0.5400853", "0.5382491", "0.538029", "0.5374808", "0.53649366", "0.53581005", "0.5357816", "0.5336766", "0.5325964", "0.532552", "0.532248", "0.5308349", "0.5301624", "0.52980554", "0.5288952", "0.5285749", "0.52814114", "0.5281245", "0.52724624", "0.52505636", "0.52492934", "0.52422786", "0.5239389", "0.5238487", "0.5236282", "0.523353", "0.5232436", "0.52295", "0.52285504", "0.52285504", "0.5225359", "0.5216348", "0.5216049", "0.5204513", "0.5203065", "0.5203065", "0.5198547", "0.5192542", "0.51898456", "0.5188779", "0.5179114", "0.51728284", "0.51611215", "0.51516205", "0.5148921", "0.51486576", "0.5147971", "0.51476943", "0.5144023", "0.51432747", "0.51418424", "0.51406276", "0.51392037", "0.51327944", "0.5125138", "0.51245415", "0.5120967", "0.51163286", "0.5113968", "0.5112612", "0.5109874", "0.5109815", "0.51096696", "0.51043546", "0.51029843", "0.5102289", "0.510147" ]
0.6988418
0
Mock the current time for SAML, so we can replay canned requests/responses
def _freeze_time(self, timestamp): now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp) now_patch.start() self.addCleanup(now_patch.stop) # lint-amnesty, pylint: disable=no-member
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mocked_time():\n return datetime.datetime(2017, 10, 27, 22, 54, 56, 566179)", "def test_method(self):\n response = self.app.get('/')\n assert isinstance(response.raw['now'], datetime.datetime)", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def test_time(self):\n result = self.test_client.time\n\n assert result == \"15093049123\"", "def test_time(self):\r\n pass", "def test_get_timestamp(monkeypatch):\n\n mock_time = datetime.datetime(2000, 1, 1, hour=0, minute=0, second=0)\n\n class mockdatetime:\n @classmethod\n def now(self):\n return mock_time\n\n monkeypatch.setattr('grimagents.common.datetime', mockdatetime)\n\n timestamp = common.get_timestamp()\n assert timestamp == '2000-01-01_00-00-00'", "def test_details_time(self):\n self.assertLess(self.details.time, datetime.now(timezone.utc))", "def test_get_current_time_is_constant() -> None:\n time_provider = TimeProvider()\n current_time_1 = time_provider.get_current_time()\n current_time_2 = time_provider.get_current_time()\n\n assert current_time_1 == current_time_2", "def test_valid_access_token_time(self):\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300\n })\n\n content_type = 'application/x-www-form-urlencoded'\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(200, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']", "def test_timing_with_time_stub(self):\n saved_time = time.time\n try:\n time.time = lambda: -1\n reporter = xml_reporter._TextAndXMLTestResult(self.xml_stream,\n self.stream,\n 'foo', 0)\n test = MockTest('bar')\n reporter.startTest(test)\n self.assertNotEqual(reporter.start_time, -1)\n finally:\n time.time = saved_time", "def mock_time(desired_time):\n # type: (float) -> Any\n return mock.patch.object(time, 'time', return_value=desired_time)", "def test_reminder_time(self):\n node = self.create_xml_patient({'Reminder_Time': '12:00'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n self.assertEqual(payload.patients.count(), 1)\n patient = payload.patients.all()[0]\n self.assertEqual(patient.reminder_time, datetime.time(12, 0))", "def test_get_time(self):\n self.assertEqual(\n get_time(), time.strftime(const.TIMESTAMP_FORMAT, time.gmtime(time.time()))\n )", "def test_datetime(self, mock_url_read):\n mock_url_read.return_value = LAST_SCAN\n self.assertEqual(datetime.datetime(2017, 9, 20, 0, 43, 35), self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def test_local_timestamp(self, mock_datetime):\n format = '%Y-%m-%dT%H:%M:%S.%f%z'\n # set up mocks for assertions,\n # return a real datetime object for simplicity in testing\n mock_datetime.now = Mock(return_value=datetime.now())\n\n blk = AddTimestamp()\n config = {'utc': False}\n self.configure_block(blk, config)\n\n # process a signal\n blk.start()\n blk.process_signals([\n Signal(),\n ])\n blk.stop()\n\n # check calls\n mock_datetime.now.assert_called_once_with()\n mock_datetime.utcnow.assert_not_called()\n # check output\n self.validate_timestamps(format)\n self.assert_num_signals_notified(1)", "def test_with_now(self):\n self.assertEqual(ageid(self.now), 'age1')", "def strict_fake_time(now):\n from grpc_testing import _time\n return _time.StrictFakeTime(now)", "def setmocktime(self, timestamp: int) -> None:\n assert type(timestamp) == int\n return self.rpc_call(\"setmocktime\", timestamp)", "def test_issue_tracked_times(self):\n pass", "def test_idle_time_alt_time_zone(self):\n FakeDateTime.now = classmethod(lambda cls, tz: datetime(2021, 7, 3, 7, 0, 0, tzinfo=tz))\n # mock_datetime.now.return_value = datetime(2021, 7, 3, 7, 0, 0, tzinfo=MST)\n # mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)\n i = idle_time(tz='US/Pacific')\n self.assertEqual(i, -3600)\n FakeDateTime.now = classmethod(lambda cls, tz: datetime(2021, 7, 3, 11, 0, 0, tzinfo=tz))\n # mock_datetime.return_value = datetime(2021, 7, 3, 11, 0, 0, tzinfo=MST)\n i = idle_time(tz='US/Pacific')\n self.assertEqual(i, 3600)\n FakeDateTime.now = classmethod(lambda cls, tz: datetime(2021, 7, 3, 13, 0, 0, tzinfo=tz))\n # mock_datetime.return_value = datetime(2021, 7, 3, 13, 0, 0, tzinfo=MST)\n i = idle_time(tz='US/Pacific')\n self.assertEqual(i, -3600)", "def test_last_login(self, mocked):\n\n # Test it authenticates if Ona User is Logged In\n mocked.get(\n \"https://stage-api.ona.io/api/v1/user\",\n json=self.ona_response,\n )\n\n factory = APIRequestFactory()\n request = factory.get('/api/v1/user')\n\n previous_last_login = self.user.last_login\n\n returned_user = self.auth.authenticate_credentials(\n 'token', request)[0]\n\n self.assertEqual(self.user, returned_user)\n # check that the last_login was updated\n self.user.refresh_from_db()\n self.assertTrue(self.user.last_login > previous_last_login)", "def _Now():\n return datetime.datetime.utcnow()", "def test_get_current_time_is_in_utc() -> None:\n time_provider = TimeProvider()\n current_time = time_provider.get_current_time()\n\n assert current_time.as_datetime().timezone == UTC", "def test_now(bot, monkeypatch, bot_arg, update):\n print('all', bot.DB._execute('select * from sp_tokens'))\n chat_id = update.message.chat_id\n token = 'token'\n\n def save_token():\n time.sleep(1)\n database = Database()\n database.config(bot.DB._filename)\n database.save_sp_token(token, chat_id)\n\n access_token = {\n 'access_token': token + '2',\n 'expires_at': (time.time() + 100),\n 'refresh_token': token + '_refresh',\n }\n monkeypatch.setattr(bot.SP, 'get_access_token', lambda x: access_token)\n\n Thread(target=save_token).start()\n monkeypatch.setattr(bot.SP, 'currently_playing', lambda x: None)\n bot.now(bot_arg, update)\n assert bot_arg.msg_log[0] == 'Please open this link to log in to Spotify'\n assert bot_arg.msg_log[1] == bot.SP.get_auth_url(chat_id)\n assert bot_arg.msg_log[2] == 'There is nothing playing!'\n\n song = Song('Orphaned land', 'ornaments of gold')\n lyrics = 'The light of the dark is the morning of the dawn'\n monkeypatch.setattr(bot.SP, 'currently_playing', lambda x: song)\n monkeypatch.setattr(bot, 'get_lyrics', lambda x, y: lyrics)\n bot.now(bot_arg, update)\n assert bot_arg.msg_log[3] == lyrics", "def test_timestamp_noint(self, mock):\n mock.configure_mock(**(self.config_payload(True, False)))\n self.assertRaises(\n TypeError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "async def test_cached_unsilence_time(self, datetime_mock):\n now_timestamp = 100\n duration = 15\n timestamp = now_timestamp + duration * 60\n datetime_mock.now.return_value = datetime.fromtimestamp(now_timestamp, tz=UTC)\n\n ctx = MockContext(channel=self.text_channel)\n await self.cog.silence.callback(self.cog, ctx, duration)\n\n self.cog.unsilence_timestamps.set.assert_awaited_once_with(ctx.channel.id, timestamp)\n datetime_mock.now.assert_called_once_with(tz=UTC) # Ensure it's using an aware dt.", "def test_logging_timestamps(self):\n import datetime\n\n fmt = \"%Y-%m-%d-%H-%M-%S\"\n time = self.chatbot.timestamp(fmt)\n\n self.assertEqual(time, datetime.datetime.now().strftime(fmt))", "def test_expired_access_token_time(self):\n\n expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(\n minutes=6)\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300,\n 'created_at': expired\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(401, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']", "def test_authenticated_session(self):\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session['foo'] = 'bar'\r\n session.save()\r\n self.assertEquals(0, outcookie['trac_session']['expires'])", "def now(self, request):\n identity = self.bot.get_plugin('identity').get_identity_by_request(request)\n\n now = times.now()\n tz = self._get_user_timezone(identity)\n local = times.to_local(now, tz)\n\n request.respond('Server time: {}\\nLocal time:{}'.format(now, local))", "def test_multiple_logins_have_same_started_at(self):\n # User A starts a survey\n self.launchSurvey(self.client_a, '1', '0112', roles=['dumper'])\n # And starts the questionnaire\n self.post(self.client_a, action='start_questionnaire')\n\n # We dump their submission\n a_submission = self.dumpSubmission(self.client_a)['submission']\n\n # User B loads the survey\n self.launchSurvey(self.client_b, '1', '0112', roles=['dumper'])\n # And we dump their submission\n b_submission = self.dumpSubmission(self.client_b)['submission']\n\n # Making sure that the started_at field is a datetime and that\n # it is the same for both users\n self.assertEqual(a_submission['started_at'], b_submission['started_at'])\n\n started_at_datetime = datetime.strptime(a_submission['started_at'], '%Y-%m-%dT%H:%M:%S.%f')\n\n self.assertIsNotNone(started_at_datetime)", "def test__API_increment_clock(self):\n self.mock_connection.state = MockConnection.CORRECT_NUM_OF_CONFIRMATIONS\n\n # simulate request\n other_time = 10\n self.mock_connection.simulate_request_from_other_process(other_time) # time bigger than ours\n # add 1 after receiving\n # add one more 1 after sending confirmation\n self.assertEqual(self.mutex.api.clock, other_time + 2)\n\n # now out time is equal to other_time + 2\n # let's receive 'release' message with older time than ours now:\n self.mock_connection.simulate_release_from_other_process(other_time + 1, 0)\n # our time is supposed to be equal to other_time + 3 now:\n # because we only increment it once when receive 'release' message\n self.assertEqual(self.mutex.api.clock, other_time + 3)", "def test_cleans_previous_token_before_fetching_new_one(self):\n new_token = deepcopy(self.token)\n past = time.time() - 7200\n now = time.time()\n self.token[\"expires_at\"] = past\n new_token[\"expires_at\"] = now + 3600\n url = \"https://example.com/token\"\n\n with mock.patch(\"time.time\", lambda: now):\n for client in self.clients:\n sess = OAuth2Session(client=client, token=self.token)\n sess.send = fake_token(new_token)\n if isinstance(client, LegacyApplicationClient):\n # this client requires a username+password\n # if unset, an error will be raised\n self.assertRaises(ValueError, sess.fetch_token, url)\n self.assertRaises(\n ValueError, sess.fetch_token, url, username=\"username1\"\n )\n self.assertRaises(\n ValueError, sess.fetch_token, url, password=\"password1\"\n )\n # otherwise it will pass\n self.assertEqual(\n sess.fetch_token(\n url, username=\"username1\", password=\"password1\"\n ),\n new_token,\n )\n else:\n self.assertEqual(sess.fetch_token(url), new_token)", "def test_last_used(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n monkeypatch.setenv('INACTIVITY_AGE', '10')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user3', 'kljin', 'Active', created, last_used)\n key.audit(10, 11, 2, 1)\n assert key.audit_state == 'expire'\n key.audit(60, 80, 2, 1)\n assert key.audit_state == 'stagnant_expire'", "def setUp(self):\n\n # Json response\n self.json_pass_times = {\n \"message\": \"success\",\n \"request\": {\n \"altitude\": 100,\n \"datetime\": 1481418788,\n \"latitude\": 15.0,\n \"longitude\": 20.0,\n \"passes\": 5\n },\n \"response\": [\n {\n \"duration\": 348,\n \"risetime\": 1481448840\n },\n {\n \"duration\": 634,\n \"risetime\": 1481454465\n },\n {\n \"duration\": 220,\n \"risetime\": 1481460482\n },\n {\n \"duration\": 224,\n \"risetime\": 1481484335\n },\n {\n \"duration\": 640,\n \"risetime\": 1481489937\n }\n ]\n }\n\n self.location = self.json_pass_times['response']\n\n #HTTP Mock\n @all_requests\n def correct_response(url, request):\n headers = {'content-type': 'application/json',\n 'Set-Cookie': 'foo=bar;'}\n return response(200, self.json_pass_times, headers, None, 5,\n request)\n self.http_correct = correct_response\n\n @all_requests\n def wrong_response(url, request):\n headers = {'content-type': 'application/json',\n 'Set-Cookie': 'foo=bar;'}\n return response(403, self.json_pass_times, headers, None, 5,\n request)\n self.http_wrong = wrong_response\n\n self.iss = pyiss.ISS()", "def test_prepare_datetime(time):\n assert SSLLabsClient().prepare_datetime(time) == \"2018-03-17\"", "def _get_issued_at(include_issued_at):\n iat = None\n if include_issued_at:\n iat = time.time()\n return iat", "def test_timestamp_backward(self, mock):\n mock.configure_mock(**(self.config_payload(0, 1)))\n self.assertRaises(\n AssertionError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def setUp(self):\n self.ol = OneloginAWS(\n _MockSection(\n base_uri=\"https://api.us.onelogin.com/\",\n client_id='mock-id',\n client_secret='mock-secret',\n aws_app_id='mock-app-id',\n subdomain='example',\n can_save_password=False,\n username='mock-username',\n duration_seconds=2600,\n auto_determine_ip_address=False,\n ),\n )\n\n self.ol.password = \"mock-password\"\n\n self.get_saml_assertion_mock = MagicMock(return_value=Namespace(\n mfa=Namespace(\n devices=[Namespace(type='mock1', id='mock-id-1'), ],\n state_token='mock-token'\n ),\n ))\n self.get_saml_assertion_verifying_mock = MagicMock(\n return_value='mock-saml-response'\n )\n self.ol.ol_client = Namespace(\n get_saml_assertion=self.get_saml_assertion_mock,\n get_saml_assertion_verifying=(\n self.get_saml_assertion_verifying_mock\n ),\n error=None,\n )", "def test_multiple_calls_to_sim_time_set_up(self, SimTimeService_mock):\n rospy.init_node('test_node', anonymous=True)\n SimTimeService_mock.return_value = None\n\n simtime_pub = SimTimePublisher()\n\n simtime_pub.set_up_simulation_time()\n with self.assertRaises(RuntimeError):\n simtime_pub.set_up_simulation_time()", "def fake_time(cls, ignored):\n cls.FAKE_TIME += 2\n return cls.FAKE_TIME", "def test_time_ss(self):\n result = self.test_client.ss\n\n assert result is None", "def test_refresh_associations_time(self, log_mock, mock_time):\n uuid = uuids.compute_node\n # Seed the provider tree so _refresh_associations finds the provider\n self.client._provider_tree.new_root('compute', uuid, generation=1)\n\n # Called a first time because association_refresh_time is empty.\n now = mock_time.return_value\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_were_called(uuid)\n log_mock.assert_has_calls([\n mock.call('Refreshing inventories for resource provider %s', uuid),\n mock.call('Updating ProviderTree inventory for provider %s from '\n '_refresh_and_get_inventory using data: %s',\n uuid, self.inv),\n mock.call('Refreshing aggregate associations for resource '\n 'provider %s, aggregates: %s', uuid, uuids.agg1),\n mock.call('Refreshing trait associations for resource '\n 'provider %s, traits: %s', uuid, 'CUSTOM_GOLD')\n ])\n\n # Clear call count.\n self.reset_getter_mocks()\n\n # Not called a second time because not enough time has passed.\n mock_time.return_value = (now +\n CONF.compute.resource_provider_association_refresh / 2)\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_not_called(timer_entry=uuid)\n\n # Called because time has passed.\n mock_time.return_value = (now +\n CONF.compute.resource_provider_association_refresh + 1)\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_were_called(uuid)", "def test_timestamp_minus(self, mock):\n mock.configure_mock(**(self.config_payload(-1, -2)))\n self.assertRaises(\n AssertionError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def test_time_status(self):\n result = self.test_client.time_status\n\n assert result == \"12312\"", "def test_issue_reset_time(self):\n pass", "async def test_setup_timestamp(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK, json={\"key\": \"2021-11-11 11:39Z\"}\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.key }}\",\n \"device_class\": SensorDeviceClass.TIMESTAMP,\n }\n },\n )\n await async_setup_component(hass, \"homeassistant\", {})\n\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n\n state = hass.states.get(\"sensor.rest_sensor\")\n assert state.state == \"2021-11-11T11:39:00+00:00\"\n assert state.attributes[ATTR_DEVICE_CLASS] == SensorDeviceClass.TIMESTAMP\n assert \"sensor.rest_sensor rendered invalid timestamp\" not in caplog.text\n assert \"sensor.rest_sensor rendered timestamp without timezone\" not in caplog.text\n\n # Bad response: Not a timestamp\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK, json={\"key\": \"invalid time stamp\"}\n )\n await hass.services.async_call(\n \"homeassistant\",\n \"update_entity\",\n {ATTR_ENTITY_ID: [\"sensor.rest_sensor\"]},\n blocking=True,\n )\n state = hass.states.get(\"sensor.rest_sensor\")\n assert state.state == \"unknown\"\n assert state.attributes[ATTR_DEVICE_CLASS] == SensorDeviceClass.TIMESTAMP\n assert \"sensor.rest_sensor rendered invalid timestamp\" in caplog.text\n\n # Bad response: No timezone\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK, json={\"key\": \"2021-10-11 11:39\"}\n )\n await hass.services.async_call(\n \"homeassistant\",\n \"update_entity\",\n {ATTR_ENTITY_ID: [\"sensor.rest_sensor\"]},\n blocking=True,\n )\n state = hass.states.get(\"sensor.rest_sensor\")\n assert state.state == \"unknown\"\n assert state.attributes[ATTR_DEVICE_CLASS] == SensorDeviceClass.TIMESTAMP\n assert \"sensor.rest_sensor rendered timestamp without timezone\" in caplog.text", "def get_time():\n return datetime.datetime.now()", "def test_time_request_message(self):\n expected_topic = self.factory.common_topic + WAPMF.TIME\n expected_payload = None\n expected_message = Message(expected_topic, expected_payload)\n\n serialized_message = self.factory.make_time_request()\n\n self.assertEqual(expected_message, serialized_message)", "def test_expires_soon(self):\n now = timezone.now()\n window = SparkSettings().RENEW_TOKEN_WINDOW\n cur = self.factory.build(access_token='good',\n expires_at=now + timedelta(seconds=window*2))\n exp = self.factory.build(access_token='expired',\n expires_at=now + timedelta(seconds=window/2))\n self.assertFalse(cur.expires_soon())\n self.assertTrue(exp.expires_soon())", "def test_timeout(self):\n # Uses a mocked version of EmailActivationTokenGenerator\n # so we can change the value of 'today'\n class Mocked(EmailActivationTokenGenerator):\n def __init__(self, today):\n self._today_val = today\n\n def _today(self):\n return self._today_val\n\n user = self.create_user()\n token_generator = EmailActivationTokenGenerator()\n token = token_generator.make_token(user)\n\n p1 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS))\n self.assertTrue(p1.check_token(user, token))\n\n p2 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS + 1))\n self.assertFalse(p2.check_token(user, token))", "def _test_id(self):\n #Force the session timeout to always update with the site's preferences.\n new_timeout = self.timeout\n Slate.__init__(\n self\n , self.session_cookie # Use the cookie name to isolate session data\n , self.originalid\n , timeout=new_timeout\n )\n if self.is_expired():\n # If we're expired, we want a new id to prevent session fixation.\n Slate.__init__(self, self.session_cookie, None, timeout=new_timeout)\n log('Session {0} expired -> {1}'.format(self.originalid, self.id))", "def test_time_field():", "def test_make_request_method(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.METHOD_KEY: SAMPLE_METHOD})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.post.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.post.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def test_missing_sleep_at_end(self, STS_init_mock):\n rospy.init_node('test_node', anonymous=True)\n\n # mock __init__ of SimTimeService\n STS_init_mock.return_value = None\n\n simtime_pub = SimTimePublisher()\n simtime_pub.set_up_simulation_time()\n\n # mock ClockService instance variables\n mck = Mock(realtime_factor=simtime_pub._SimTime.realtime_factor,\n step_size=simtime_pub._SimTime.step_size)\n simtime_pub.ClockService = mck\n\n simtime_pub.update_simulation_time()\n\n with self.assertRaises(RuntimeError):\n simtime_pub.update_simulation_time()", "def actual_time():\n return _time.time()", "def test_is_traveling(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n assert not travelcalculator.is_traveling()\n assert travelcalculator.position_reached()\n\n travelcalculator.set_position(80)\n assert not travelcalculator.is_traveling()\n assert travelcalculator.position_reached()\n\n mock_time.return_value = 1580000000.0\n travelcalculator.start_travel_down()\n\n mock_time.return_value = 1580000004.0\n assert travelcalculator.is_traveling()\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000005.0\n assert not travelcalculator.is_traveling()\n assert travelcalculator.position_reached()", "def setUp(self):\n self.now = 123456789\n def fakeTime():\n return self.now\n self.patch(ls, 'time', fakeTime)\n\n # Make sure that the timezone ends up the same after these tests as\n # it was before.\n if 'TZ' in os.environ:\n self.addCleanup(operator.setitem, os.environ, 'TZ', os.environ['TZ'])\n self.addCleanup(time.tzset)\n else:\n def cleanup():\n # os.environ.pop is broken! Don't use it! Ever! Or die!\n try:\n del os.environ['TZ']\n except KeyError:\n pass\n time.tzset()\n self.addCleanup(cleanup)", "def test_issue_add_time(self):\n pass", "def setup_system_xmodule_mocks_for_lti20_request_test(self):\r\n self.system.get_real_user = Mock(return_value=self.USER_STANDIN)\r\n self.xmodule.max_score = Mock(return_value=1.0)\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', u'test_client_secret'))\r\n self.xmodule.verify_oauth_body_sign = Mock()", "def _now():\n return datetime.datetime.utcnow().replace(tzinfo=pytz.utc)", "def test_time_lapse(self):\n t0 = time.time()\n time.sleep(2)\n lap = time_lapse(t0)\n self.assertEqual(lap, '00:00:02')", "def test_create_proxy(self):\n\n with patch('hgw_common.models.OAuth2Session', new_callable=MockOAuth2Session) as mock:\n m = mock(200)\n OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)\n # The datetime object has a precision to 10e-6 seconds while the timestamp 10e-7.\n # This precision is irrelevant in this case but we need to modify the original value\n m.token['expires_at'] = datetime.fromtimestamp(m.token['expires_at']).timestamp()\n mock.assert_called()\n self.assertEqual(AccessToken.objects.count(), 1)\n self.assertDictEqual(AccessToken.objects.first().to_python(), mock().token)", "def test_make_request(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def test_last_access(self):\n self.assertIsInstance(self.obj.last_access, datetime)", "def setUpClass(cls):\n now = timezone.now()\n cls.expired_dt = now + timedelta(days=-10)\n cls.current_dt = now + timedelta(days=90)", "def test_is_expired_time_based(self):\n expired_dt = datetime.now() + timedelta(hours=-1)\n good_dt = datetime.now() + timedelta(hours=1)\n expired_pass = DoorPassFactory.create(device=self.device, expires_at=expired_dt)\n good_pass = DoorPassFactory.create(device=self.device, expires_at=good_dt)\n self.assertTrue(expired_pass.is_expired())\n self.assertFalse(good_pass.is_expired())", "def test_info_message(self):\n utcmock = MagicMock()\n utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))\n with patch(\"datetime.datetime\", utcmock):\n msg = \"SIMM crosstalk during tectonic stress\"\n out = saltsupport.LogCollector()\n out.info(msg)\n assert saltsupport.LogCollector.INFO in out.messages\n assert (\n type(out.messages[saltsupport.LogCollector.INFO])\n == saltsupport.LogCollector.MessagesList\n )\n assert out.messages[saltsupport.LogCollector.INFO] == [\n \"00:00:00.000 - {}\".format(msg)\n ]", "def test_timestamp_added(self):\n response = CreateArticle().create_article()\n self.assertIsNotNone(response.created_at)", "def test_get_game_time_on_ice(self):\n msg = \"Response status is not 200\"\n response = self.api.get_game_time_on_ice(self.game_id)\n self.assertEqual(response.status_code, 200, msg)", "def assertion_callback(_request, _uri, headers):\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')", "def test_latest_total_response_times_pruned(self):\n s = StatsEntry(self.stats, \"/\", \"GET\", use_response_times_cache=True)\n t = int(time.time())\n for i in reversed(range(2, 30)):\n s.response_times_cache[t - i] = CachedResponseTimes(\n response_times={}, num_requests=0\n )\n self.assertEqual(29, len(s.response_times_cache))\n s.log(17, 1337)\n s.last_request_timestamp -= 1\n s.log(1, 1)\n self.assertEqual(20, len(s.response_times_cache))\n self.assertEqual(\n CachedResponseTimes(response_times={17: 1}, num_requests=1),\n s.response_times_cache.popitem(last=True)[1],\n )", "def time_now():\n return time.time()", "def testExpirationTime(self):\n\n bye = \"Good bye!\"\n memcache.add('bye', bye, 1)\n assert memcache.get('bye') == bye\n time.sleep(2)\n assert memcache.get('bye') == None", "def test_legacy_client_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def test_timestamp_is_not_available():\n mock_message.timestamp = Mock(return_value=(0, 0))\n new_message = Message(mock_message)\n\n assert new_message.value == mock_message.value()\n assert new_message._raw == mock_message\n assert new_message._meta.key == mock_message.key()\n assert new_message._meta.partition == mock_message.partition()\n assert new_message._meta.offset == mock_message.offset()\n assert new_message._meta.topic == mock_message.topic()\n assert new_message._meta.timestamp is None\n assert new_message._meta.datetime is None", "def test_sleep_request(self):\n date = datetime.now() - timedelta(minutes=14)\n RequestAPI.objects.create(total_request=450, date=date)\n start = time.time()\n ManagerRequestApiTwitter().handle_rate_limit()\n stop = time.time()\n total_time = stop - start\n self.assertGreater(total_time, 60)", "def test_get_datetime(client):\n response = client.get('/post/1.0')\n assert response.status_code == 200\n assert response.json['posted_at'] is not None", "def setUp(self):\n SimTimePublisher._sim_time_setup_requested = False", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def now():\r\n return datetime.datetime.now()", "def test_travel_full_up(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(30)\n travelcalculator.start_travel_up()\n\n mock_time.return_value = 1580000014.0\n assert not travelcalculator.position_reached()\n assert not travelcalculator.is_closed()\n assert not travelcalculator.is_open()\n\n mock_time.return_value = 1580000015.0\n assert travelcalculator.position_reached()\n assert travelcalculator.is_open()\n assert not travelcalculator.is_closed()", "def test_timestamp_and_datetime_extraction():\n test_datetime = datetime.datetime(2017, 1, 15)\n test_timestamp = (test_datetime - datetime.datetime(1970, 1, 1)).total_seconds() * 1000.0\n\n # valid timestamp in the form kafka would send it if SET\n mock_message.timestamp = Mock(return_value=(1, test_timestamp))\n\n timestamp = extract_timestamp_from_message(mock_message)\n assert timestamp == test_timestamp\n assert kafka_timestamp_to_datetime(timestamp) == test_datetime\n\n # valid timestamp in the form kafka would send it if NOT SET\n mock_message.timestamp = Mock(return_value=(1, -1))\n\n timestamp = extract_timestamp_from_message(mock_message)\n assert timestamp is None\n assert kafka_timestamp_to_datetime(timestamp) is None\n\n # no timestamp in the form kafka would send it if NOT AVAILABLE\n mock_message.timestamp = Mock(return_value=(0, 0))\n\n timestamp = extract_timestamp_from_message(mock_message)\n assert timestamp is None\n assert kafka_timestamp_to_datetime(timestamp) is None", "def parse_time_to_SAML(time):\n data = datetime.utcfromtimestamp(float(time))\n return data.strftime('%Y-%m-%dT%H:%M:%SZ')", "def test_default_config(self, mock_datetime):\n format = '%Y-%m-%dT%H:%M:%S.%fZ'\n # set up mocks for assertions,\n # return a real datetime object for simplicity in testing\n mock_datetime.utcnow = Mock(return_value=datetime.utcnow())\n\n blk = AddTimestamp()\n config = {}\n self.configure_block(blk, config)\n\n # process a list of signals\n blk.start()\n blk.process_signals([\n Signal({'foo': 'bar'}),\n Signal({'foo': 'baz'}),\n ])\n blk.stop()\n\n # check calls\n mock_datetime.now.assert_not_called()\n mock_datetime.utcnow.assert_called_once_with()\n # check output, enriched by default\n self.validate_timestamps(format)\n self.assert_last_signal_list_notified([\n Signal({\n 'foo': 'bar',\n 'timestamp': ANY,\n }),\n Signal({\n 'foo': 'baz',\n 'timestamp': ANY,\n }),\n ])", "def _get_now():\n return datetime.now(tz=timezone.utc)", "def test_response_has_auth_token_and_sid(self, request_post):\n gae_req = AppEngineRequest(url=\"/foo\", appid=\"test\", source=\"test\",\n email=\"[email protected]\", password=\"foobar\")\n\n request_post.return_value = Mock(text=\"Auth=my_token\\nSID=my_sid\")\n\n token = gae_req.get_auth_token()\n\n self.assertEqual(token, \"my_token\")\n self.assertEqual(gae_req.sid, \"my_sid\")\n\n data = {\n \"Email\": \"[email protected]\",\n \"Passwd\": \"foobar\",\n \"service\": \"ah\",\n \"source\": \"test\",\n \"accountType\": \"HOSTED_OR_GOOGLE\"\n }\n\n request_post.assert_called_once_with(\n \"https://www.google.com/accounts/ClientLogin\", data=data)", "async def test_process_set_custom_time(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx,\n \"TestDateTime\",\n group_address=\"1/2/3\",\n broadcast_type=\"TIME\",\n localtime=False,\n )\n assert self.datetime.remote_value.value is None\n\n test_time = time.strptime(\"9:13:14\", \"%H:%M:%S\")\n await self.datetime.set(test_time)\n telegram = xknx.telegrams.get_nowait()\n assert telegram == Telegram(\n destination_address=GroupAddress(\"1/2/3\"),\n payload=GroupValueWrite(DPTArray((0x9, 0xD, 0xE))),\n )\n await self.datetime.process(telegram)\n assert self.datetime.remote_value.value == test_time", "def test_state(self, mock_req):\n self.setup_api(MOCK_DATA, mock_req)\n now = datetime(1970, month=1, day=1)\n with patch(\"homeassistant.util.dt.now\", return_value=now):\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n self.fake_delay(2)\n sensor.update()\n if name == google_wifi.ATTR_LAST_RESTART:\n assert \"1969-12-31 00:00:00\" == sensor.state\n elif name == google_wifi.ATTR_UPTIME:\n assert 1 == sensor.state\n elif name == google_wifi.ATTR_STATUS:\n assert \"Online\" == sensor.state\n else:\n assert \"initial\" == sensor.state", "def test_clear_last_started_date(self):\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n saver.log_last_started_datetime()\n self.assertIsNotNone(saver.get_last_started_datetime())\n saver.clear_last_started_datetime()\n self.assertIsNone(saver.get_last_started_datetime())", "def test_run_now(curent_time,state):\n date = datetime(2020,5,5,12,0)\n duration_in_minutes = 65\n run = Run(date, duration_in_minutes/60)\n\n assert run.run_now(curent_time) == state", "def remember_identity(response, request, identity):\n raise NotImplementedError # pragma: nocoverage", "def time_now():\n return datetime.datetime.now().time()", "def test_send_notification_end_membership(self):\n\n fixed_time = timezone.now()\n\n end_time_membership = fixed_time + relativedelta(days=28)\n\n self.user.membership = self.membership\n self.user.membership_end = end_time_membership\n self.user.save()\n\n with mock.patch(\n 'store.serializers.timezone.now',\n return_value=fixed_time\n ):\n response = self.client.get(\n reverse('user-execute-automatic-email-membership-end')\n )\n\n content = {\n 'stop': False,\n 'email_send_count': 1\n }\n\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n response.content\n )\n\n self.assertEqual(\n json.loads(response.content),\n content\n )\n\n self.assertEqual(len(mail.outbox), 1)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.membership_end_notification, fixed_time)\n\n with mock.patch(\n 'store.serializers.timezone.now',\n return_value=fixed_time\n ):\n response = self.client.get(\n reverse('user-execute-automatic-email-membership-end')\n )\n content = {\n 'stop': False,\n 'email_send_count': 0\n }\n\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n response.content\n )\n\n self.assertEqual(\n json.loads(response.content),\n content\n )\n\n # no new mail\n self.assertEqual(len(mail.outbox), 1)", "def get_time(self):\n return datetime.datetime.now(self.time_zone)", "async def test_validate_session(api_client: TestClient, coresys: CoreSys):\n with patch(\"aiohttp.web_request.BaseRequest.__getitem__\", return_value=None):\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": \"non-existing\"},\n )\n assert resp.status == 401\n\n with patch(\n \"aiohttp.web_request.BaseRequest.__getitem__\",\n return_value=coresys.homeassistant,\n ):\n resp = await api_client.post(\"/ingress/session\")\n result = await resp.json()\n\n assert \"session\" in result[\"data\"]\n session = result[\"data\"][\"session\"]\n assert session in coresys.ingress.sessions\n\n valid_time = coresys.ingress.sessions[session]\n\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": session},\n )\n assert resp.status == 200\n assert await resp.json() == {\"result\": \"ok\", \"data\": {}}\n\n assert coresys.ingress.sessions[session] > valid_time", "def test_expires(self):\n # We aren't bother going to test the actual time in expires, that\n # way lies pain with broken tests later.\n up = self.get(self.good_data)\n hdrs = dict(up.get_headers(1))\n lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])\n exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])\n assert (exp - lm).seconds == 3600", "def test_get(self, mock_logging):\r\n twitter = client.ScreenUserClient(mock_logger)\r\n twitter.client.statuses.user_timeline = MagicMock()\r\n with pytest.raises(InternalServerError) as e:\r\n twitter.get(\"python\", 3)\r\n\r\n mock_logging.assert_called_with(\"Get User TimeLine, name = python\")\r\n assert twitter.client.statuses.user_timeline.called", "def test_emotion_history_route_has_date_on_page(self):\n self.client.login(username='dan', password='password')\n now = timezone.now()\n response = self.client.get(reverse_lazy('emotion_history',\n kwargs={\n 'year': now.year,\n 'month': now.month,\n 'day': now.day\n }))\n self.assertIn(b'Records for', response.content)", "def pytest_timeout_set_timer(item, settings):" ]
[ "0.6736465", "0.6140573", "0.60731506", "0.60279644", "0.59742117", "0.5914771", "0.5881534", "0.5857517", "0.5802075", "0.57887524", "0.57644403", "0.5713352", "0.56917495", "0.5675409", "0.56512123", "0.5646837", "0.56387556", "0.5580659", "0.557747", "0.557065", "0.5564709", "0.5506594", "0.5500785", "0.5499147", "0.5496223", "0.5495353", "0.5487346", "0.5478385", "0.5454392", "0.5453732", "0.54512393", "0.54290164", "0.5402303", "0.5398189", "0.5389583", "0.5369773", "0.5364567", "0.53642726", "0.5356263", "0.5347775", "0.53447646", "0.53391516", "0.5333994", "0.53240585", "0.5304179", "0.53035617", "0.52946573", "0.5294019", "0.5274343", "0.5267734", "0.52621686", "0.5260528", "0.5249113", "0.52397513", "0.52352625", "0.52302104", "0.5229232", "0.52254194", "0.5218686", "0.52153295", "0.52041227", "0.52011013", "0.51973796", "0.5193567", "0.5187566", "0.5187071", "0.51824707", "0.51771307", "0.51768124", "0.51735735", "0.5165592", "0.51614386", "0.51595694", "0.5144579", "0.51419044", "0.5140997", "0.51405454", "0.5139728", "0.5137972", "0.5131139", "0.51298296", "0.51253855", "0.5123402", "0.51203156", "0.5113384", "0.5112351", "0.5109635", "0.51071864", "0.5105854", "0.5101334", "0.50840557", "0.50818604", "0.5079611", "0.5078895", "0.50749797", "0.5073705", "0.50712276", "0.507082", "0.50676626", "0.50673425" ]
0.586782
7
Enable and configure the TestShib SAML IdP as a third_party_auth provider
def _configure_testshib_provider(self, **kwargs): fetch_metadata = kwargs.pop('fetch_metadata', True) assert_metadata_updates = kwargs.pop('assert_metadata_updates', True) kwargs.setdefault('name', self.PROVIDER_NAME) kwargs.setdefault('enabled', True) kwargs.setdefault('visible', True) kwargs.setdefault("backend_name", "tpa-saml") kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG) kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID) kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL) kwargs.setdefault('icon_class', 'fa-university') kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName kwargs.setdefault('max_session_length', None) kwargs.setdefault('send_to_registration_first', False) kwargs.setdefault('skip_email_verification', False) saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member if fetch_metadata: assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata() if assert_metadata_updates: assert num_total == 1 # lint-amnesty, pylint: disable=no-member assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member assert num_updated == 1 # lint-amnesty, pylint: disable=no-member assert num_failed == 0 # lint-amnesty, pylint: disable=no-member assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member return saml_provider
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)", "def init_saml_auth(saml_prepared_flask_request):\n return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=app.config.get('SAML_PATH', None))", "def init_saml_auth(req):\n auth = OneLogin_Saml2_Auth(req, custom_base_path=app.config[\"SAML_PATH\"])\n return auth", "def __init__(self, base_url):\n module_base = \"%s/%s\" % (base_url, Saml2BackendPlugin.provider)\n sp_config = {\n \"entityid\": \"%s/proxy_sp.xml\" % module_base,\n \"service\": {\n \"sp\": {\n \"allow_unsolicited\": True,\n \"endpoints\": {\n \"assertion_consumer_service\": [\n (\"%s/acs/post\" % module_base, BINDING_HTTP_POST),\n (\"%s/acs/redirect\" % module_base, BINDING_HTTP_REDIRECT)\n ],\n }\n }\n },\n \"key_file\": TestConfiguration.get_instance().backend_key.name,\n \"cert_file\": TestConfiguration.get_instance().backend_cert.name,\n \"metadata\": {\n \"local\": TestConfiguration.get_instance().fake_idp_metadata,\n },\n\n \"xmlsec_binary\": TestConfiguration.get_instance().xmlsec_path,\n }\n config = {\"config\": sp_config,\n \"idp_entity_id\": \"https://example.com/unittest_idp.xml\",\n \"state_id\": \"saml_backend_test_id\"\n }\n\n super(Saml2BackendPlugin, self).__init__(SamlBackend, Saml2BackendPlugin.provider, config)", "def add_tomcat7_idp():\n pass", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def setup_provider(self):\n pass", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def get_saml_auth(flask_request):\n return OneLogin_Saml2_Auth(prepare_flask_request_for_saml(flask_request), custom_base_path=app.config.get('SAML_PATH', None))", "def __init__(self, base_url):\n idpconfig = {\n \"entityid\": \"{}/proxy.xml\".format(base_url),\n \"service\": {\n \"idp\": {\n \"endpoints\": {\n \"single_sign_on_service\": [(\"%s/%s/sso/redirect\" %\n (base_url, Saml2BackendPlugin.provider),\n BINDING_HTTP_REDIRECT),\n (\"%s/%s/sso/post\" %\n (base_url, Saml2BackendPlugin.provider),\n BINDING_HTTP_POST)]\n },\n },\n },\n \"key_file\": TestConfiguration.get_instance().frontend_key.name,\n \"cert_file\": TestConfiguration.get_instance().frontend_cert.name,\n \"metadata\": {\n \"local\": TestConfiguration.get_instance().fake_sp_metadata,\n },\n \"xmlsec_binary\": TestConfiguration.get_instance().xmlsec_path,\n }\n\n config = {\"idp_config\": idpconfig,\n \"endpoints\": Saml2FrontendPlugin.endpoints,\n \"base\": base_url,\n \"state_id\": \"saml_frontend_state_id\"}\n\n super(Saml2FrontendPlugin, self).__init__(SamlFrontend, \"Saml2IDP\", config)", "def setup(cls, transport_config):\n cls.we_are_initiator = transport_config.weAreClient\n\n # Check for shared-secret in the server transport options.\n transport_options = transport_config.getServerTransportOptions()\n if transport_options and \"shared-secret\" in transport_options:\n log.debug(\"Setting shared-secret from server transport options: '%s'\", transport_options[\"shared-secret\"])\n cls.shared_secret = transport_options[\"shared-secret\"]", "def enable_sso(DirectoryId=None, UserName=None, Password=None):\n pass", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def test_externalauth_login_required_course_context(self):\r\n TARGET_URL = reverse('courseware', args=[self.course.id.to_deprecated_string()]) # pylint: disable=C0103\r\n noshib_response = self.client.get(TARGET_URL, follow=True)\r\n self.assertEqual(noshib_response.redirect_chain[-1],\r\n ('http://testserver/accounts/login?next={url}'.format(url=TARGET_URL), 302))\r\n self.assertContains(noshib_response, (\"Log into your {platform_name} Account | {platform_name}\"\r\n .format(platform_name=settings.PLATFORM_NAME)))\r\n self.assertEqual(noshib_response.status_code, 200)\r\n\r\n TARGET_URL_SHIB = reverse('courseware', args=[self.shib_course.id.to_deprecated_string()]) # pylint: disable=C0103\r\n shib_response = self.client.get(**{'path': TARGET_URL_SHIB,\r\n 'follow': True,\r\n 'REMOTE_USER': self.extauth.external_id,\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/'})\r\n # Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain\r\n # The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we\r\n # won't test its contents\r\n self.assertEqual(shib_response.redirect_chain[-3],\r\n ('http://testserver/shib-login/?next={url}'.format(url=TARGET_URL_SHIB), 302))\r\n self.assertEqual(shib_response.redirect_chain[-2],\r\n ('http://testserver{url}'.format(url=TARGET_URL_SHIB), 302))\r\n self.assertEqual(shib_response.status_code, 200)", "def includeme(config):\n # authentication\n auth_secret = os.environ.get('AUTH_SECRET', '')\n auth_policy = AuthTktAuthenticationPolicy(\n secret=auth_secret,\n hashalg='sha512'\n )\n config.set_authentication_policy(auth_policy)\n # authorization\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n config.set_root_factory(MyRoot)\n\n session_secret = os.environ.get('SESSION_SECRET', '')\n session_factory = SignedCookieSessionFactory(session_secret)\n config.set_session_factory(session_factory)\n config.set_default_csrf_options(require_csrf=True)", "def includeme(config):\n # Grab the pyramid-wide settings, to look for any auth config.\n settings = config.get_settings().copy()\n # Use the settings to construct an AuthenticationPolicy.\n authn_policy = SRPAuthenticationPolicy.from_settings(settings)\n config.set_authentication_policy(authn_policy)\n # Hook up a default AuthorizationPolicy.\n # You can't have one without the other, and ACLAuthorizationPolicy is\n # usually what you want. If the app configures one explicitly then this\n # will get overridden.\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n # Add forbidden view to challenge for auth credentials.\n config.add_view(authn_policy.challenge_view,\n context=\"pyramid.exceptions.Forbidden\")", "def samladsv3(self):\n try:\n # Get the federated credentials from the user\n print(\"[-] Get authentication token\")\n print(\"Email:\", end=' ')\n username = input()\n password = getpass.getpass()\n print('')\n\n # Initiate session handler\n session = requests.Session()\n\n # Programmatically get the SAML assertion\n # Opens the initial IdP url and follows all of the HTTP302 redirects, and\n # gets the resulting login page\n formresponse = session.get(idpentryurl, verify=sslverification)\n # Capture the idpauthformsubmiturl, which is the final url after all the 302s\n idpauthformsubmiturl = formresponse.url\n\n # Parse the response and extract all the necessary values\n # in order to build a dictionary of all of the form values the IdP expects\n formsoup = BeautifulSoup(formresponse.text, \"html.parser\")\n payload = {}\n\n for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):\n name = inputtag.get('name','')\n value = inputtag.get('value','')\n if \"user\" in name.lower():\n #Make an educated guess that this is the right field for the username\n payload[name] = username\n elif \"email\" in name.lower():\n #Some IdPs also label the username field as 'email'\n payload[name] = username\n elif \"pass\" in name.lower():\n #Make an educated guess that this is the right field for the password\n payload[name] = password\n else:\n #Simply populate the parameter with the existing value (picks up hidden fields in the login form)\n payload[name] = value\n\n # Debug the parameter payload if needed\n # Use with caution since this will print sensitive output to the screen\n #print(payload)\n\n # Some IdPs don't explicitly set a form action, but if one is set we should\n # build the idpauthformsubmiturl by combining the scheme and hostname\n # from the entry url with the form action target\n # If the action tag doesn't exist, we just stick with the\n # idpauthformsubmiturl above\n for inputtag in formsoup.find_all(re.compile('(FORM|form)')):\n action = inputtag.get('action')\n loginid = inputtag.get('id')\n if (action and loginid == \"loginForm\"):\n parsedurl = urlparse(idpentryurl)\n idpauthformsubmiturl = parsedurl.scheme + \"://\" + parsedurl.netloc + action\n\n # Performs the submission of the IdP login form with the above post data\n response = session.post(\n idpauthformsubmiturl, data=payload, verify=sslverification)\n\n # Debug the response if needed\n #print(response.text)\n\n # Overwrite and delete the credential variables, just for safety\n username = '##############################################'\n password = '##############################################'\n del username\n del password\n\n # Decode the response and extract the SAML assertion\n soup = BeautifulSoup(response.text, \"html.parser\")\n assertion = ''\n\n # Look for the SAMLResponse attribute of the input tag (determined by\n # analyzing the debug print lines above)\n for inputtag in soup.find_all('input'):\n if(inputtag.get('name') == 'SAMLResponse'):\n #print(inputtag.get('value'))\n assertion = inputtag.get('value')\n\n # Better error handling is required for production use.\n if (assertion == ''):\n #TODO: Insert valid error checking/handling\n print('Response did not contain a valid SAML assertion')\n sys.exit(0)\n\n # Debug only\n #print(base64.b64decode(assertion))\n\n # Parse the returned assertion and extract the authorized roles\n awsroles = []\n root = ET.fromstring(base64.b64decode(assertion))\n for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):\n if (saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role'):\n for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):\n awsroles.append(saml2attributevalue.text)\n\n # Note the format of the attribute value should be role_arn,principal_arn\n # but lots of blogs list it as principal_arn,role_arn so let's reverse\n # them if needed\n for awsrole in awsroles:\n chunks = awsrole.split(',')\n if'saml-provider' in chunks[0]:\n newawsrole = chunks[1] + ',' + chunks[0]\n index = awsroles.index(awsrole)\n awsroles.insert(index, newawsrole)\n awsroles.remove(awsrole)\n\n # If I have more than one role, ask the user which one they want,\n # otherwise just proceed\n print(\"\")\n if len(awsroles) > 1:\n i = 0\n print(\"Please choose the role you would like to assume:\")\n for awsrole in awsroles:\n print('[', i, ']: ', awsrole.split(',')[0])\n i += 1\n print(\"Selection: \", end=' ')\n selectedroleindex = input()\n\n # Basic sanity check of input\n if int(selectedroleindex) > (len(awsroles) - 1):\n print('You selected an invalid role index, please try again')\n sys.exit(0)\n\n role_arn = awsroles[int(selectedroleindex)].split(',')[0]\n principal_arn = awsroles[int(selectedroleindex)].split(',')[1]\n else:\n role_arn = awsroles[0].split(',')[0]\n principal_arn = awsroles[0].split(',')[1]\n\n # Use the assertion to get an AWS STS token using Assume Role with SAML\n conn = boto3.client('sts', region_name=region)\n token = conn.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion)\n\n # Read in the existing config file\n config = configparser.RawConfigParser()\n config.read(credentials)\n\n # Put the credentials into a saml specific section instead of clobbering\n # the default credentials\n if not config.has_section('saml'):\n config.add_section('saml')\n\n config['saml']['output'] = outputformat\n config['saml']['region'] = region\n config['saml']['aws_access_key_id'] = token['Credentials']['AccessKeyId']\n config['saml']['aws_secret_access_key'] = token['Credentials']['SecretAccessKey']\n config['saml']['aws_session_token'] = token['Credentials']['SessionToken']\n\n # Write the updated config file\n with open(credentials, 'w+') as configfile:\n config.write(configfile)\n\n # Give the user some basic info as to what has just happened\n print('\\n\\n----------------------------------------------------------------')\n print('Your new access key pair has been stored in the AWS configuration file {0} under the saml profile.'.format(credentials))\n print('Note that it will expire at {0}.'.format(token['Credentials']['Expiration'].astimezone(get_localzone())))\n print('After this time, you may safely rerun this script to refresh your access key pair.')\n print('To use this credential, call the AWS CLI with the --profile option (e.g. aws --profile saml ec2 describe-instances).')\n print('----------------------------------------------------------------\\n\\n')\n\n return samladsv3\n\n except Exception as e:\n print(\"Error while getting authentication token. %s\" % e)", "def setup_auth_turing(cluster):\n # Read in auth info\n azure_file = os.path.join(ABSOLUTE_HERE, \"secrets\", \"turing-auth-key-prod.json\")\n with open(azure_file, \"r\") as stream:\n azure = json.load(stream)\n\n # Login in to Azure\n login_cmd = [\n \"az\", \"login\", \"--service-principal\",\n \"--username\", azure[\"sp-app-id\"],\n \"--password\", azure[\"sp-app-key\"],\n \"--tenant\", azure[\"tenant-id\"]\n ]\n subprocess.check_output(login_cmd)\n\n # Set kubeconfig\n creds_cmd = [\n \"az\", \"aks\", \"get-credentials\",\n \"--name\", cluster,\n \"--resource-group\", \"binder-prod\"\n\n ]\n stdout = subprocess.check_output(creds_cmd)\n print(stdout.decode('utf-8'))", "def test_open_id_setup(self):\r\n self.attempt_login(200)", "def request_app_setup(hass, config, add_devices, discovery_info=None):\n from requests.compat import urljoin\n from requests_oauthlib import OAuth2Session\n configurator = hass.components.configurator\n authorization_base_url = urljoin(BASE_URL, '/oauth/authorize')\n oauth = OAuth2Session(config[CONF_CLIENT_ID], redirect_uri=REDIRECT_URI, state=None)\n\n def trakt_configuration_callback(data):\n \"\"\"Run when the configuration callback is called.\"\"\"\n token_url = urljoin(BASE_URL, '/oauth/token')\n oauth.fetch_token(token_url, client_secret=config[CONF_CLIENT_SECRET], code=data.get('pin_code'))\n token = oauth.token['access_token']\n save_token(hass, token)\n continue_setup_platform(hass, config, token, add_devices, discovery_info)\n\n if 'trakt' not in _CONFIGURING:\n authorization_url, _ = oauth.authorization_url(authorization_base_url, username=config[CONF_USERNAME])\n\n _CONFIGURING['trakt'] = configurator.request_config(\n 'Trakt',\n trakt_configuration_callback,\n description=\"Enter pin code from Trakt: \" + authorization_url,\n submit_caption='Verify',\n fields=[{\n 'id': 'pin_code',\n 'name': \"Pin code\",\n 'type': 'string'}]\n )", "def test_auth0_config_anon(anontestapp, registry):\n _test_auth_config(anontestapp, registry)", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def provider(hass):\n provider = hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )\n hass.loop.run_until_complete(provider.async_initialize())\n return provider", "def on_identity_loaded(sender, identity):\n key = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_SESSION_KEY\",\n OAUTHCLIENT_CERN_OPENID_SESSION_KEY,\n )\n identity.provides.update(session.get(key, []))", "def post_setup(cls):\n super().post_setup()\n\n # The SENTRY_DSN setting should be available to activate sentry for an environment\n if cls.SENTRY_DSN is not None:\n sentry_sdk.init( # pylint: disable=abstract-class-instantiated\n dsn=cls.SENTRY_DSN,\n environment=cls._get_environment(),\n release=get_release(),\n integrations=[DjangoIntegration()],\n )\n with sentry_sdk.configure_scope() as scope:\n scope.set_extra(\"application\", \"backend\")", "def test_client_key_secret(self):\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['lti_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n self.xmodule.lti_id = \"lti_id\"\r\n key, secret = self.xmodule.get_client_key_secret()\r\n expected = ('test_client', 'test_secret')\r\n self.assertEqual(expected, (key, secret))", "def test_shib_login_enrollment(self):\r\n student = UserFactory.create()\r\n extauth = ExternalAuthMap(external_id='[email protected]',\r\n external_email='',\r\n external_domain='shib:https://idp.stanford.edu/',\r\n external_credentials=\"\",\r\n internal_password=\"password\",\r\n user=student)\r\n student.set_password(\"password\")\r\n student.save()\r\n extauth.save()\r\n\r\n course = CourseFactory.create(org='Stanford', number='123', display_name='Shib Only')\r\n course.enrollment_domain = 'shib:https://idp.stanford.edu/'\r\n self.store.update_item(course, '**replace_user**')\r\n\r\n # use django test client for sessions and url processing\r\n # no enrollment before trying\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n self.client.logout()\r\n request_kwargs = {'path': '/shib-login/',\r\n 'data': {'enrollment_action': 'enroll', 'course_id': course.id.to_deprecated_string(), 'next': '/testredirect'},\r\n 'follow': False,\r\n 'REMOTE_USER': '[email protected]',\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/'}\r\n response = self.client.get(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n # Clean up and try again with POST (doesn't happen with real production shib, doing this for test coverage)\r\n self.client.logout()\r\n CourseEnrollment.unenroll(student, course.id)\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n response = self.client.post(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))", "def setup_course_with_proctoring_backend(self, proctoring_provider, escalation_email):\n course = CourseFactory.create(enable_proctored_exams=True,\n enable_timed_exams=True,\n proctoring_provider=proctoring_provider,\n proctoring_escalation_email=escalation_email)\n self.setup_course_url(course)", "def test_client_key_secret(self):\n #this adds lti passports to system\n mocked_course = Mock(lti_passports=['lti_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = \"lti_id\"\n key, secret = self.xmodule.get_client_key_secret()\n expected = ('test_client', 'test_secret')\n assert expected == (key, secret)", "def svn_client_get_ssl_server_trust_prompt_provider(svn_auth_provider_object_t_provider, svn_auth_ssl_server_trust_prompt_func_t_prompt_func, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']", "def set_auth_credentials():\n import os\n from passlib.apps import custom_app_context as pwd_context\n\n os.environ[\"AUTH_USERNAME\"] = \"testme\"\n os.environ[\"AUTH_PASSWORD\"] = pwd_context.hash(\"foobar\")", "def sso_saml_acs(request, idp_slug):\n # todo these are placeholders for the json dump below\n error_reason = None\n request_session_data = None\n saml_relay = None\n\n request_id = request.session.get('AuthNRequestID')\n processed_response = request.saml2_auth.process_response(request_id=request_id)\n errors = request.saml2_auth.get_errors()\n not_auth_warn = not request.saml2_auth.is_authenticated()\n\n if not errors:\n if 'AuthNRequestID' in request.session:\n del request.session['AuthNRequestID']\n\n store_saml_data_in_session(request)\n\n user = auth.authenticate(\n request=request,\n username=request.session['samlNameId'],\n idp_slug=idp_slug,\n is_handshake_successful=True,\n )\n\n # we add the messages to the django messages framework here since\n # that middleware was not available for SsoBackend\n if hasattr(request, 'sso_new_user_messages'):\n for success_message in request.sso_new_user_messages['success']:\n messages.success(request, success_message)\n for error_message in request.sso_new_user_messages['error']:\n messages.error(request, error_message)\n\n if user:\n auth.login(request, user)\n\n # activate new project if needed\n project_name = get_new_sso_user_project_name_from_session(request)\n if project_name:\n try:\n request_new_domain(request, project_name, is_new_user=True)\n except NameUnavailableException:\n # this should never happen, but in the off chance it does\n # we don't want to throw a 500 on this view\n messages.error(\n request,\n _(\"We were unable to create your requested project \"\n \"because the name was already taken.\"\n \"Please contact support.\")\n )\n\n clear_sso_registration_data_from_session(request)\n return redirect(\"homepage\")\n\n # todo for debugging purposes to dump into the response below\n request_session_data = {\n \"samlUserdata\": request.session['samlUserdata'],\n \"samlNameId\": request.session['samlNameId'],\n \"samlNameIdFormat\": request.session['samlNameIdFormat'],\n \"samlNameIdNameQualifier\": request.session['samlNameIdNameQualifier'],\n \"samlNameIdSPNameQualifier\": request.session['samlNameIdSPNameQualifier'],\n \"samlSessionIndex\": request.session['samlSessionIndex'],\n }\n\n else:\n error_reason = request.saml2_auth.get_last_error_reason()\n\n return HttpResponse(json.dumps({\n \"errors\": errors,\n \"error_reason\": error_reason,\n \"not_auth_warn\": not_auth_warn,\n \"request_id\": request_id,\n \"processed_response\": processed_response,\n \"saml_relay\": saml_relay,\n \"request_session_data\": request_session_data,\n \"login_error\": getattr(request, 'sso_login_error', None),\n }), 'text/json')", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')", "def sso_saml_login(request, idp_slug):\n login_url = request.saml2_auth.login()\n username = get_sso_username_from_session(request)\n if username:\n # verify that the stored user data actually the current IdP\n idp = IdentityProvider.get_active_identity_provider_by_username(username)\n if idp and idp.slug == idp_slug:\n # pre-populate username for Azure AD\n login_url = f'{login_url}&login_hint={username}'\n return HttpResponseRedirect(login_url)", "def configure_irida_galaxy_connection(self, galaxy_url):\n self.configure_tool('IRIDA', 'client_secret', self.REDIRECT_CLIENT_SECRET)\n self.configure_tool('Galaxy', 'galaxy_url', galaxy_url)", "def test_client_key_secret_not_provided(self):\r\n\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n #set another lti_id\r\n self.xmodule.lti_id = \"another_lti_id\"\r\n key_secret = self.xmodule.get_client_key_secret()\r\n expected = ('','')\r\n self.assertEqual(expected, key_secret)", "def _get_saml_client(domain):\n acs_url = domain + '/sso/acs/'\n time_slack = 0\n mdata = tempfile.NamedTemporaryFile()\n f = open(mdata.name, 'wb')\n f.write(_urllib.urlopen(\n settings.SAML2_AUTH['METADATA_AUTO_CONF_URL']).read()\n )\n f.close()\n saml_settings = {\n 'metadata': {\n 'local': [mdata.name],\n },\n 'entityid': acs_url,\n 'service': {\n 'sp': {\n 'endpoints': {\n 'assertion_consumer_service': [\n (acs_url, BINDING_HTTP_REDIRECT),\n (acs_url, BINDING_HTTP_POST)\n ],\n },\n 'allow_unsolicited': True,\n 'authn_requests_signed': False,\n 'logout_requests_signed': True,\n 'want_assertions_signed': True,\n 'want_response_signed': False,\n },\n },\n 'accepted_time_diff': time_slack,\n }\n\n spConfig = Saml2Config()\n spConfig.load(saml_settings)\n spConfig.allow_unknown_attributes = True\n saml_client = Saml2Client(config=spConfig)\n mdata.close()\n return saml_client", "def _setup_threat_intel_auth_subparser(subparsers):\n generate_subparser(\n subparsers,\n 'update-auth',\n description='Enable, disable, or configure the threat intel downloader function',\n subcommand=True\n )", "def set_up_login():\n\n bitool.app.testing = True\n bitool.app.config['TESTING'] = True\n bitool.app.login_manager.init_app(bitool.app)\n app = bitool.app.test_client()\n\n return app", "def test_add_trusted_project(self):\n pass", "def svn_client_get_simple_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def test_client_key_secret_not_provided(self):\n\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n # set another lti_id\n self.xmodule.lti_id = \"another_lti_id\"\n key_secret = self.xmodule.get_client_key_secret()\n expected = ('', '')\n assert expected == key_secret", "def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def set_credentials():", "def test_read_env_config3(config, environment_vars_set):\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"goood\"", "def test_add_trusted_project3(self):\n pass", "def configure_aaa_local_auth(device):\n try:\n device.configure([\n \"aaa authentication dot1x default local\",\n \"aaa local authentication default authorization default\",\n \"aaa authorization network default local\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA local auth'\n )", "def update_identity_provider(module, sdk, cloud, idp):\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n attributes = {}\n\n if (description is not None) and (description != idp.description):\n attributes['description'] = description\n if (enabled is not None) and (enabled != idp.is_enabled):\n attributes['enabled'] = enabled\n if (domain_id is not None) and (domain_id != idp.domain_id):\n attributes['domain_id'] = domain_id\n if (remote_ids is not None) and (remote_ids != idp.remote_ids):\n attributes['remote_ids'] = remote_ids\n\n if not attributes:\n return False, idp\n\n if module.check_mode:\n return True, None\n\n try:\n new_idp = cloud.identity.update_identity_provider(idp, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to update identity provider: {0}'.format(str(ex)))\n return (True, new_idp)", "def configure_ext_login(app):\n lm.init_app(app)\n\n @lm.user_loader\n def load_user(userid):\n \"\"\"\n Needed for flask-login.\n \"\"\"\n return models.User.query.get(int(userid))\n\n @app.before_request\n def set_g_user():\n g.user = current_user", "def get_sp_auth_url(self, session, sp_id, **kwargs):\n return None", "def test_add_trusted_project2(self):\n pass", "def configure_auth(self, auth_type, ha_type):\n yield self.configure_kerberos(auth_type, ha_type)\n self.configure_radius(auth_type)", "def test_add_trusted_project1(self):\n pass", "def register_auth_opts(conf, group, service_type=None):\n ks_loading.register_session_conf_options(conf, group)\n ks_loading.register_auth_conf_options(conf, group)\n CONF.set_default('auth_type', default='password', group=group)\n ks_loading.register_adapter_conf_options(conf, group)\n conf.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group)\n if service_type:\n conf.set_default('service_type', service_type, group=group)\n else:\n types = os_service_types.get_service_types()\n key = 'ironic-inspector' if group == 'inspector' else group\n service_types = types.service_types_by_project.get(key)\n if service_types:\n conf.set_default('service_type', service_types[0], group=group)", "def test_add_trusted_project4(self):\n pass", "def svn_client_get_ssl_server_trust_file_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def session_setup(opts: Dict[Any, Any]) -> Any: #TODO\n stype = ''\n if 'serverca' in opts and 'cert' in opts:\n stype = 'ssl'\n s = session.get(stype, **opts)\n if s is None:\n raise errors.KojiError('Unable to idenify authentication type.')\n s.login()\n if not s.is_ok():\n raise errors.AuthError('Unable to validate session')\n return s", "def configure_https():\n # need to write all to ensure changes to the entire request pipeline\n # propagate (c-api, haprxy, apache)\n CONFIGS.write_all()\n if 'https' in CONFIGS.complete_contexts():\n cmd = ['a2ensite', 'openstack_https_frontend']\n subprocess.check_call(cmd)\n else:\n cmd = ['a2dissite', 'openstack_https_frontend']\n subprocess.check_call(cmd)\n\n # TODO: improve this by checking if local CN certs are available\n # first then checking reload status (see LP #1433114).\n service_reload('apache2', restart_on_failure=True)\n\n for rid in relation_ids('identity-service'):\n identity_joined(rid=rid)", "def test_add_trusted_project7(self):\n pass", "def setUp(self):\r\n super(CLITestAuthKeystoneWithIdandName, self).setUp()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def assert_first_party_auth_trumps_third_party_auth(self, email=None, password=None, success=None):\r\n _, strategy = self.get_request_and_strategy(\r\n auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')\r\n strategy.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))\r\n self.create_user_models_for_existing_account(\r\n strategy, email, password, self.get_username(), skip_social_auth=True)\r\n\r\n strategy.request.POST = dict(strategy.request.POST)\r\n\r\n if email:\r\n strategy.request.POST['email'] = email\r\n if password:\r\n strategy.request.POST['password'] = 'bad_' + password if success is False else password\r\n\r\n self.assert_pipeline_running(strategy.request)\r\n payload = json.loads(student_views.login_user(strategy.request).content)\r\n\r\n if success is None:\r\n # Request malformed -- just one of email/password given.\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('There was an error receiving your login information', payload.get('value'))\r\n elif success:\r\n # Request well-formed and credentials good.\r\n self.assertTrue(payload.get('success'))\r\n else:\r\n # Request well-formed but credentials bad.\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('incorrect', payload.get('value'))", "def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"", "def initiateAuthentication(identity_url, return_to=None):", "def test_proctortrack_provider_with_email(self):\n self.setup_course_with_proctoring_backend('proctortrack', '[email protected]')\n\n self.instructor.is_staff = True\n self.instructor.save()\n self._assert_escalation_email_available(True)", "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def config_setup(self, config):\n super(PushGatewayApiV1TestCase, self).config_setup(config)\n config[\"apps\"][\"com.example.spqr\"] = {\n \"type\": \"tests.test_pushgateway_api_v1.TestPushkin\"\n }", "def __init__(self, requestor, client_id, client_secret, redirect_uri=None):\n super(TrustedAuthenticator, self).__init__(requestor, client_id,\n redirect_uri)\n self.client_secret = client_secret", "def setup_class(self):\n self.endpoint = VERSION_PREFIX + '/auth/login'\n self.test_client = create_app().test_client()", "def install(self, provider):\n pass # pragma: no cover", "def __init__(__self__, *,\n authorization_strategy: pulumi.Input['FhirDatastoreIdentityProviderConfigurationAuthorizationStrategy'],\n fine_grained_authorization_enabled: Optional[pulumi.Input[bool]] = None,\n idp_lambda_arn: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"authorization_strategy\", authorization_strategy)\n if fine_grained_authorization_enabled is not None:\n pulumi.set(__self__, \"fine_grained_authorization_enabled\", fine_grained_authorization_enabled)\n if idp_lambda_arn is not None:\n pulumi.set(__self__, \"idp_lambda_arn\", idp_lambda_arn)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)", "def init():\n ok = not g.unitTesting\n if ok:\n g.plugin_signon(__name__)\n return ok", "def test_auth_test(self):\n backend = LdapBackend()\n backend.authenticate(None, username=\"apple\", password=\"ffffff\")", "def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_add_trusted_project5(self):\n pass", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def configure_aaa_session_id(device, type):\n try:\n device.configure([\n f\"aaa session-id {type}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA session ID'\n )", "def test_add_trusted_project6(self):\n pass", "def setup_module():\n pytest.test_user = fake_user.FakeUser()", "def setUp(self):\n application.config['TESTING'] = True\n application.config['WTF_CSRF_ENABLED'] = False\n application.config['DEBUG'] = False\n self.app = application.test_client()\n # setup plaid client\n self.client = Client(\n ENV_VARS[\"PLAID_CLIENT_ID\"],\n ENV_VARS[\"PLAID_SECRET\"],\n ENV_VARS[\"PLAID_PUBLIC_KEY\"],\n \"sandbox\"\n )\n self.public_token = sandbox.PublicToken(self.client)\n db.drop_all()\n db.create_all()", "def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n client_id: pulumi.Input[str],\n principal_id: pulumi.Input[str],\n secret: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'servicePrincipalSecret')\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"principal_id\", principal_id)\n pulumi.set(__self__, \"secret\", secret)", "def sign_in(user_email, user_id):\n ourTestbed.setup_env(\n user_email=user_email,\n user_id=str(user_id),\n user_is_admin='0', # This was for GAE user admin, we use AppUser.\n overwrite=True)", "def test_replace_o_auth_client(self):\n pass", "def init() -> None:\n # Setup elib_config\n elib_config.ELIBConfig.setup(\n app_version=__version__,\n app_name='ESST',\n config_file_path='esst.toml',\n config_sep_str='__',\n )\n\n # Write example config file\n elib_config.write_example_config('esst.toml.example')\n\n # Validate config\n try:\n elib_config.validate_config()\n except elib_config.ConfigMissingValueError as error:\n LOGGER.error('missing mandatory config value: %s', error.value_name)\n LOGGER.error('please read \"esst.toml.example\" for instructions on how to setup the configuration for ESST')\n sys.exit(1)\n\n for config in SentryConfigContext.__subclasses__():\n SENTRY.register_context(context_name=config.__name__, context_provider=config)", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def setup(provider):\n try:\n logger.info(f\"Setting up {provider.__name__}_{os.getenv('step')}\")\n return getattr(provider(os.getenv('host')),\n f'setup_{os.getenv(\"step\", \"first\")}_step')()\n except Exception as err:\n logger.exception(f\"Could not configure {provider}: {err}\")\n raise", "def create_identity_provider(module, sdk, cloud, name):\n\n if module.check_mode:\n return True, None\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n if enabled is None:\n enabled = True\n if remote_ids is None:\n remote_ids = []\n\n attributes = {\n 'domain_id': domain_id,\n 'enabled': enabled,\n 'remote_ids': remote_ids,\n }\n if description is not None:\n attributes['description'] = description\n\n try:\n idp = cloud.identity.create_identity_provider(id=name, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to create identity provider: {0}'.format(str(ex)))\n return (True, idp)", "def auth_token_provider_relaxed_ssl(self, auth_token_provider_relaxed_ssl):\n\n self._auth_token_provider_relaxed_ssl = auth_token_provider_relaxed_ssl", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "def add_virtual_authenticator(self, config):\n pass", "def enable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n\n cmd = f'aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa authentication login:\\n{e}'\n )", "def setUp(self):\n self.new_credentials = Credentials(\"Facebook\",\"Josphato\",\"jose!!otieno@45\")", "def setUp(self):\n self.ol = OneloginAWS(\n _MockSection(\n base_uri=\"https://api.us.onelogin.com/\",\n client_id='mock-id',\n client_secret='mock-secret',\n aws_app_id='mock-app-id',\n subdomain='example',\n can_save_password=False,\n username='mock-username',\n duration_seconds=2600,\n auto_determine_ip_address=False,\n ),\n )\n\n self.ol.password = \"mock-password\"\n\n self.get_saml_assertion_mock = MagicMock(return_value=Namespace(\n mfa=Namespace(\n devices=[Namespace(type='mock1', id='mock-id-1'), ],\n state_token='mock-token'\n ),\n ))\n self.get_saml_assertion_verifying_mock = MagicMock(\n return_value='mock-saml-response'\n )\n self.ol.ol_client = Namespace(\n get_saml_assertion=self.get_saml_assertion_mock,\n get_saml_assertion_verifying=(\n self.get_saml_assertion_verifying_mock\n ),\n error=None,\n )", "def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)", "def _enable(cls, provider):\r\n if provider.NAME in cls._ENABLED:\r\n raise ValueError('Provider %s already enabled' % provider.NAME)\r\n cls._ENABLED[provider.NAME] = provider", "def test_auth0_config_admin(testapp, registry):\n _test_auth_config(testapp, registry)", "def assertion_callback(_request, _uri, headers):\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')", "def install_tomcat7_idp():\n print(sys._getframe().f_code.co_name)\n _install_packages([\n 'tomcat7', \n 'libmysql-java', \n 'libjstl1.1-java'\n ])\n \n # RENDER AND PUT idp.xml template into Catalina\n idpxml = _j2_env.get_template('tomcat7/idp.xml').render(\n idp_path = IDP_INSTALL_PATH,\n )\n _safe_put( \n StringIO(idpxml), \n TOMCAT_INST_PATH+'/Catalina/localhost/idp.xml'\n )\n \n _safe_put( \n TEMPLATE_DIR+'/tomcat7/server.xml', \n TOMCAT_INST_PATH+'/server.xml'\n )\n \n _safe_put( \n TEMPLATE_DIR+'/tomcat7/tomcat7', \n '/etc/default/tomcat7'\n )\n \n # installs addictional JARS\n _download_file( JSTL_DL_URL, TOMCAT_JAR_PATH, )\n \n commands = [\n #~ 'systemctl tomcat7 enable',\n 'update-rc.d tomcat7 enable',\n 'service tomcat7 restart'\n ]\n \n _run_safe_commands(commands)\n run('ln -sf /usr/share/java/mysql.jar /usr/share/tomcat7/lib/mysql.jar')" ]
[ "0.67878383", "0.63547695", "0.63465583", "0.61199355", "0.59225214", "0.58825934", "0.56425726", "0.563755", "0.562901", "0.5535802", "0.5476775", "0.53583604", "0.531819", "0.5295527", "0.52504843", "0.51937705", "0.5125788", "0.50825155", "0.5081821", "0.5010863", "0.49874368", "0.497723", "0.49631655", "0.495106", "0.4943688", "0.49377185", "0.49259707", "0.49202064", "0.48945984", "0.48763403", "0.4851705", "0.48495936", "0.48491895", "0.48425806", "0.4842183", "0.48410052", "0.48401615", "0.48274457", "0.48150727", "0.48108736", "0.4806917", "0.47917086", "0.4782673", "0.4778927", "0.47665408", "0.47602665", "0.4746173", "0.4742457", "0.47307882", "0.47275802", "0.47267336", "0.47196847", "0.47185338", "0.47157046", "0.471258", "0.47066835", "0.47040984", "0.46997902", "0.46986637", "0.46928975", "0.4688851", "0.46870512", "0.46817997", "0.46746692", "0.46645582", "0.46508792", "0.46481526", "0.46448463", "0.46305457", "0.46268693", "0.4625582", "0.46193218", "0.46172458", "0.46164563", "0.46149004", "0.45949072", "0.45940736", "0.4591577", "0.45850375", "0.45661008", "0.4565482", "0.45627666", "0.4561809", "0.456077", "0.45588943", "0.45569566", "0.45554662", "0.45545626", "0.45522034", "0.45509878", "0.45507798", "0.45488256", "0.45345807", "0.45270503", "0.452627", "0.45026338", "0.44867426", "0.44823113", "0.44797346", "0.4477505" ]
0.7070957
0
Gets dict (string > object) of merged data about the user.
def get_response_data(self): response_data = dict(self.TOKEN_RESPONSE_DATA) response_data.update(self.USER_RESPONSE_DATA) return response_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_user_data(self):\n return {\"key\": self._key}", "def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.update(data)\n return dct", "def json(self):\n result = {}\n for user in self.users:\n result[user.user_id] = user.json\n return result", "def user_data(self):\n itemuser = self.data['user']\n my_user_dict = {'user_id': itemuser['id'], 'user_name': itemuser['name'],\n 'user_handle': itemuser['screen_name'], 'user_desc': itemuser['description'],\n 'twitter_birthday': itemuser['created_at'], 'user_location': itemuser['location'],\n 'followers': itemuser['followers_count'], 'favorites': itemuser['favourites_count'],\n 'statuses': itemuser['statuses_count']}\n return my_user_dict", "def get_user_data(self):\n return self.user_data", "def get_user_data(self):\n return self.user_data", "def user_dict(self):\n return {\n \"user_id\": self.user_id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othernames\": self.othernames,\n \"username\": self.username,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"is_admin\": self.is_admin,\n \"password\": self.password,\n \"registered_on\": self.registered_on\n }", "def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }", "def rootuser_info(self, datadict):\n\n dict1 = OrderedDict()\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']\n\n userdict = OrderedDict()\n keylist = ['id', 'username', 'full_name', 'biography', 'edge_follow', 'edge_followed_by', 'is_private', 'external_url', 'profile_pic_url_hd']\n\n for key in keylist:\n if key is 'edge_follow':\n userdict['following'] = dict1[key]\n elif key is 'edge_followed_by':\n userdict['followers'] = dict1[key]\n else:\n userdict[key] = dict1[key]\n\n userdict['platform'] = datadict['platform']\n\n return (json.dumps(userdict, indent=4))", "def get_merged_data(self):\n return self._combinedata", "def get_users(self):\n return {key: value.user for key, value in self}", "def get_user_info_by_id(self, user_id: int) -> dict:", "def to_dict(self):\n return self._user_data", "def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None", "def user_data(self):\n return self._user_data", "def GetUserData(self):\r\n\r\n return self.user_data", "def get_users_info(): \n \n data = user_obj.get_users_info()\n return data", "def user2dict(self):\n d = {}\n d['username'] = self.username\n d['level'] = self.level\n d['name'] = self.name\n d['email'] = self.email\n d['creation'] = self.creation\n d['update'] = self.update\n d['nsentences'] = self.nsentences\n d['nsessions'] = self.nsessions\n d['score'] = self.score\n d['pw_hash'] = self.pw_hash\n return d", "def serialize(self):\r\n return {\r\n \"user_id\": self.id,\r\n \"username\": self.username,\r\n \"image\": self.user_img,\r\n }", "def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]", "def get_data(self, user_id: str) -> dict:\n data = {\n 'id': str(user_id),\n 'first_name': '',\n 'last_name': '',\n 'fullname': '',\n 'email': '',\n 'internal': False,\n }\n try:\n _ = UUID(user_id) # noQA\n except (ValueError, AttributeError):\n logger.error(f'Actor id is not a valid UUID: {user_id}')\n else:\n if user_id == SystemUser.id:\n raw_data = SystemUser\n data['first_name'] = raw_data.first_name\n data['last_name'] = raw_data.last_name\n data['fullname'] = raw_data.title\n data['email'] = raw_data.email\n data['internal'] = raw_data.internal\n\n return data", "def getCurrentUserData(self):\r\n userDict = {}\r\n for c in range(self.view.userTable.columnCount()):\r\n colName = self.view.userTable.horizontalHeaderItem(c).text()\r\n userDict[colName] = self.view.userTable.item(self.view.userTable.currentRow(), c).text()\r\n \r\n return userDict", "def _collect_user_data(utt_sets: Collection[Collection[Utterance]]) -> Tuple[Dict[str, Dict[Hashable, str]], Dict[str, Dict[Hashable, bool]]]:\n # Collect USER data and metadata\n # all_users_data = defaultdict(lambda: defaultdict(set))\n all_users_meta = defaultdict(lambda: defaultdict(str))\n all_users_meta_conflict = defaultdict(lambda: defaultdict(bool))\n for utt_set in utt_sets:\n for utt in utt_set:\n for meta_key, meta_val in utt.user.meta.items():\n curr = all_users_meta[utt.user][meta_key]\n if curr != meta_val:\n if curr != \"\":\n all_users_meta_conflict[utt.user][meta_key] = True\n all_users_meta[utt.user][meta_key] = meta_val\n\n return all_users_meta, all_users_meta_conflict", "def merge_user_information(self, sid):\n pprint(self.extracted_information)\n for (field, value) in self.extracted_information.items():\n value = value[0] # TODO: should set data for everything in list but will do later\n self.data.set_data(sid, field, value[0])", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def json(self):\n\n this_user_detail = dict(\n arn=self.arn,\n create_date=self.create_date,\n id=self.user_id,\n inline_policies=self.inline_policies_json,\n inline_policies_count=len(self.inline_policies_json),\n # groups=self.groups,\n groups=self.groups_json,\n path=self.path,\n managed_policies_count=len(self.attached_managed_policies),\n managed_policies=self.attached_managed_policies_pointer_json,\n risks=self.consolidated_risks\n )\n return this_user_detail", "def extract_common_fields(self, data):\n member = data.get('member', {})\n return {'username': member.get('name'), 'email': member.get('email')}", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n }", "def get_response_data(self):\r\n response_data = dict(self.TOKEN_RESPONSE_DATA)\r\n response_data.update(self.USER_RESPONSE_DATA)\r\n return response_data", "def data(self, user=None):\n return {\n \"provider\": self.BACKEND,\n \"access_token\": self.access_token,\n \"client_id\": self.client_id,\n \"honor_code\": \"true\",\n \"country\": \"US\",\n \"username\": user.username if user else \"test_username\",\n \"name\": user.first_name if user else \"test name\",\n \"email\": user.email if user else \"[email protected]\"\n }", "def serialize(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'joinedDate': self.joinedDate\n }", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def get_user_data(current_user):\n current_data = current_user.data\n current_data['score'] = current_user.get_score()\n del current_data['email']\n del current_data['sso_id']\n del current_data['is_admin']\n return current_data", "def _get_userinfo(self):\n if not hasattr(self, \"_userinfo\"):\n self._userinfo = {\n \"name\" : self.user_name,\n \"email\" : self.user_email\n }\n if self.user_id:\n u = self.user\n if u.email:\n self._userinfo[\"email\"] = u.email\n\n # If the user has a full name, use that for the user name.\n # However, a given user_name overrides the raw user.username,\n # so only use that if this review has no associated name.\n if u.get_full_name():\n self._userinfo[\"name\"] = self.user.get_full_name()\n elif not self.user_name:\n self._userinfo[\"name\"] = u.username\n return self._userinfo", "def get(self):\n return self.context.as_dict(self.user)", "def get_logging_values(self):\n values = {'user_name': self.user_name}\n values.update(self.to_dict())\n return values", "def users(self):\n return json.loads(self._cache.get(self._key))", "def extract_user_gql(data):\n return {\n \"pk\": int(data[\"id\"]),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data[\"is_private\"],\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n \"media_count\": data[\"edge_owner_to_timeline_media\"][\"count\"],\n \"follower_count\": data[\"edge_followed_by\"][\"count\"],\n \"following_count\": data[\"edge_follow\"][\"count\"],\n \"biography\": data[\"biography\"],\n \"external_url\": data[\"external_url\"],\n \"is_business\": data[\"is_business_account\"],\n }", "def get_payload(self) -> json:\n data = self.fetch_user_from_db()\n if data.shape[0] == 0:\n return json.dumps({self.user_id: \"No records found!\"})\n else:\n return json.dumps(\n {str(self.user): data[[\"similar\", \"score\"]].to_dict(orient=\"records\")}\n )", "def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}", "def json(self):\n return {\n 'id': self.id,\n 'email': self.email,\n 'username': self.username\n }", "def getUsersData(self):\n users_data = self.get_api_results(\n \"/api/user/list?fields=id,username,name,phone,email,url,about,role,joined,lastactive,avatar,company,position,location&api_key={0}&format=json\")\n for user in users_data:\n if user[\"name\"] not in self.users:\n self.add_user(user)\n else:\n self.merge_user(user)\n return users_data", "def getUserInfosFromLoadedUsers(self,loaded_users,date_type):\n user_infos={}\n def addToUserInfo(loaded_user):\n user_infos[str(loaded_user.getUserID())]=loaded_user.getUserInfo(date_type) #python xmlrpc required keys not to be integers\n \n map(addToUserInfo,loaded_users)\n return user_infos", "def as_dict(self):\n return {\n 'interface_id': self.interface.id,\n 'tag': self.user_tag.as_dict() if self.user_tag else None\n }", "def user_ret():\n user_list = []\n all_objs = storage.all(\"User\")\n for obj in all_objs.values():\n user_list.append(obj.to_dict())\n return jsonify(user_list)", "def get_user_data(prs, client_id, client_secret):\n users = {}\n for owner, repo, number, pr in prs:\n username = pr.username\n\n # Initialize the User if needed\n if username not in users:\n print(pr.user_url, file=sys.stderr)\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n resp = requests.get(pr.user_url, params=payload)\n\n # Abort if the return is an error\n out = resp.json()\n if 'message' in out:\n pprint.pprint(out, file=sys.stderr)\n raise Exception(resp.text)\n\n user = User(out)\n users[username] = user\n\n users[username].add_pr(pr)\n\n return users", "def serialize(self):\n return {\n \"first_name\" : self.first_name.capitalize(),\n \"last_name\" : self.last_name.capitalize(),\n \"name\" : self.first_name.capitalize() + ' ' + self.last_name.capitalize(),\n \"user_id\" : self.id,\n }", "def getInfo(self):\n request = self._connection.get('bookmarklet')\n userdata = self._userinfo_regex.search(request.text)\n if userdata is None: userdata = self._userinfo_regex_2.search(request.text)\n if userdata is None: raise errors.DiaspyError('cannot find user data')\n userdata = userdata.group(1)\n return json.loads(userdata)", "def serialize(self):\n return {\n 'id' : self.id,\n 'username' : self.username,\n 'email' : self.email\n }", "def get_user(self, username):\n return {}", "def __get_user(self, login):\n\n user = {}\n\n if not login:\n return user\n\n user_raw = self.client.get_user(login)\n user = json.loads(user_raw)\n self._push_cache_queue(user_raw)\n user_orgs_raw = \\\n self.client.get_user_orgs(login)\n user['organizations'] = json.loads(user_orgs_raw)\n self._push_cache_queue(user_orgs_raw)\n self._flush_cache_queue()\n\n return user", "def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'name': self.name,\n\t\t\t'user': self.user_id\n\t\t}", "def extract_user_short(data):\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }", "def enrich(r):\n row = r[1]\n userReference = row[0]\n user = row[1]\n # if there's no user, it means there wasn't match between the users in the db vs the ones from the vendor coming from the api\n if user and userReference:\n user[\"userId\"] = userReference.userId\n user[\"userActive\"] = userReference.active\n user[\"userLastActiveDate\"] = userReference.lastActiveDate\n return user", "def user_data(self, access_token, *args, **kwargs):\n response = self.request(\n \"https://openapi.naver.com/v1/nid/me\",\n headers={\n \"Authorization\": f\"Bearer {access_token}\",\n \"Content_Type\": \"text/json\",\n },\n )\n\n data = response.json()\n\n return {\n \"id\": self._fetch(data, \"id\"),\n \"email\": self._fetch(data, \"email\"),\n \"username\": self._fetch(data, \"name\"),\n \"nickname\": self._fetch(data, \"nickname\"),\n \"gender\": self._fetch(data, \"gender\"),\n \"age\": self._fetch(data, \"age\"),\n \"birthday\": self._fetch(data, \"birthday\"),\n \"profile_image\": self._fetch(data, \"profile_image\"),\n }", "def get_user_info(self):\n user_info = self.data_source.get_user_info(self.user_id)\n\n return user_info", "def to_dict(self):\n return {\n \"id\":self.id,\n \"username\":self.email,\n \"email\":self.email,\n \"firstname\":self.firstname,\n \"lastname\":self.lastname\n }", "def get_users(self):\n\n users = {}\n command = \"/user print terse\"\n output = self._send_command(command)\n\n for user in parse_terse_output(output):\n users[user.get('name')] = {\n \"group\": user.get('group')\n }\n\n return users", "def get_persons(self):\n response = self.do_request('/misc/user/export/json')\n if response:\n return response.json()", "def userinfo(self):\n return self._userinfo", "def user_data(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_data\")", "def _get(self, query=None):\n if not query:\n user_data = DB_USER_TABLE.all()\n else:\n user_data = DB_USER_TABLE.search(query)\n\n res = {\n \"total_queried\" : len(user_data),\n \"_embedded\" : {\n \"users\" : self.embed_user_data_in_result(user_data)\n },\n \"_links\" : self.make_links({\n \"self\" : UserList.get_self_url(),\n \"contained_in\" : Root.get_self_url()\n })\n }\n return res", "def get_serialized_user(cls, user):\n return {\n 'email': user.email,\n 'is_superuser': user.is_superuser,\n 'name': user.name,\n 'sodar_uuid': str(user.sodar_uuid),\n 'username': user.username,\n }", "def user_profile_data(id):\n user = User.query.get(id)\n return user.to_dict_profile()", "def getUserInfo(self):\r\n userJson = self.httpGet(ReaderUrl.USER_INFO_URL)\r\n result = json.loads(userJson, strict=False)\r\n self.userId = result['userId']\r\n return result", "def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data", "def extract_user_dict_from_tweet( tweet: Tweet ):\n if tweet.other_data and len( tweet.other_data ) > 0:\n # extract the json into a dict\n j = json.loads( tweet.other_data )\n # extract the user json from the created dict\n return json.loads( j[ 'user' ] )", "def as_safe_dict(self):\n\t\treturn {\n\t\t\t'group': (self.group.gid if self.group else None),\n\t\t\t'name': self.name,\n\t\t\t'uid': self.uid\n\t\t}", "def serialize(self):\n return {\n 'user_id' : self.user_id,\n 'conference_id' : self.conference_id,\n }", "def get_user_completion_data(self):\n activity_completion_data = self.generate_report_data()\n\n if not activity_completion_data:\n return {}\n\n user_activity_completion_data = {\n 'total_activities': activity_completion_data[0].get('total_activities', 0),\n 'course_is_complete': activity_completion_data[0].get('course_is_complete', False),\n 'completed_activities': activity_completion_data[0].get('completed_activities', 0),\n }\n\n user_activity_completion_data.update(get_required_activity_dict(activity_completion_data[0]))\n\n return user_activity_completion_data", "def GetUserData(self, user_id):\n user_data = self.db_manager.GetData(user_id)\n user_data = self._parsetouserDTO(user_data)\n return JsonSerializer.DeserializeJson(user_data)", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id':u.id, 'admin':u.admin})\n return { 'users' : usersJSON }", "async def red_get_data_for_user(self, *, user_id):\n data = \"No data is stored for user with ID {}.\\n\".format(user_id)\n return {\"user_data.txt\": BytesIO(data.encode())}", "async def red_get_data_for_user(self, *, user_id):\n data = \"No data is stored for user with ID {}.\\n\".format(user_id)\n return {\"user_data.txt\": BytesIO(data.encode())}", "def serialize(self):\n return {\n 'user_id' : self.user_id,\n 'session_id' : self.session_id,\n }", "def load_users(self):\n return self.um.read_json(\"users.json\")", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'userID': self.userID,\n }", "def users_view():\n data = get_data()\n return [{'user_id': i, 'name': 'User {0}'.format(str(i))}\n for i in data.keys()]", "def fetch_user(user_id):\n user = user_collection.find_one({\"_id\": user_id})\n user_bookmarks = list()\n for project_id in user[\"bookmarks\"]:\n project = project_collection.find_one({\"_id\": project_id})\n if project is None:\n continue\n bookmark_details = {\n \"PROJECT_ID\": str(project_id),\n \"projectTitle\": project[\"projectTitle\"],\n \"projectDescription\": project[\"projectDescription\"],\n }\n user_bookmarks.append(bookmark_details)\n user_contributions = list()\n for project_id in user[\"contributions\"]:\n project = project_collection.find_one({\"_id\": project_id})\n if project is None:\n continue\n contribution_details = {\n \"projectTitle\": project[\"projectTitle\"],\n \"projectDescription\": project[\"projectDescription\"],\n }\n user_contributions.append(contribution_details)\n user_dict = {\n \"username\": user[\"username\"],\n \"userid\": user[\"userid\"],\n \"email\": user[\"email\"],\n \"avatar\": user[\"avatar\"],\n \"githubURL\": user[\"githubURL\"],\n \"linkedinURL\": user[\"linkedinURL\"],\n \"stackoverflowURL\": user[\"stackoverflowURL\"],\n \"skills\": user[\"skills\"],\n \"bookmarks\": user_bookmarks,\n \"contributions\": user_contributions,\n }\n return user_dict", "def update_users_data(self) -> None:\n users_utts = defaultdict(list)\n users_convos = defaultdict(list)\n\n for utt in self.iter_utterances():\n users_utts[utt.user].append(utt)\n\n for convo in self.iter_conversations():\n for utt in convo.iter_utterances():\n users_convos[utt.user].append(convo)\n\n for user in self.iter_users():\n user.utterances = {utt.id: utt for utt in users_utts[user]}\n user.conversations = {convo.id: convo for convo in users_convos[user]}", "def generate(self) -> dict:\n user_data = {\n \"merge_proposals\": self._render_merge_proposals(),\n \"bug_reports\": self._render_reported(),\n \"code_reviews\": {},\n }\n for project in self.projects:\n user_data[\"code_reviews\"][\n project.name\n ] = project.render_project_votes_by_user(self.user)\n\n return user_data", "def users_instance():\n return {\n \"blocked\": False,\n \"created_at\": \"2022-10-21T04:10:34.240Z\",\n \"email\": \"[email protected]\",\n \"email_verified\": False,\n \"family_name\": \"Kerluke\",\n \"given_name\": \"Nick\",\n \"identities\": [\n {\n \"user_id\": \"15164a44-8064-4ef9-ac31-fb08814da3f9\",\n \"connection\": \"Username-Password-Authentication\",\n \"provider\": \"auth0\",\n \"isSocial\": False,\n }\n ],\n \"name\": \"Linda Sporer IV\",\n \"nickname\": \"Marty\",\n \"picture\": \"https://secure.gravatar.com/avatar/15626c5e0c749cb912f9d1ad48dba440?s=480&r=pg&d=https%3A%2F%2Fssl.gstatic.com%2Fs2%2Fprofiles%2Fimages%2Fsilhouette80.png\",\n \"updated_at\": \"2022-10-21T04:10:34.240Z\",\n \"user_id\": \"auth0|15164a44-8064-4ef9-ac31-fb08814da3f9\",\n \"user_metadata\": {},\n \"app_metadata\": {},\n }", "def get_object_data(self, **kwargs):\n user = self.request.user\n return UserProfile.objects.get(user=user)", "def serialize(self):\n return {'id': self.id,\n 'rowId': self.id,\n 'name': self.name,\n 'owner': self.user.username,\n 'isOwner': current_user.id == self.owner_id,\n }", "def to_dict(self):\n user_idt = self.user_idt_format.format(user=self.user_id)\n\n return {'user': self.user_id,\n 'is_admin': self.is_admin,\n 'read_only': self.read_only,\n 'show_deleted': self.show_deleted,\n 'auth_token': self.auth_token,\n 'request_id': self.request_id,\n 'roles': self.roles,\n 'user_identity': user_idt,\n 'user_name': self.user_name}", "def get_json(self):\n url = 'http://lkd.to/api/' + self.user\n response = requests.get(url)\n return response.json()", "def get_user_info_by_name(self, username: str) -> dict:", "def view_user(user):\n return {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"email\": user.email,\n \"profile_pic\": user.profile_pic,\n }", "def getUserInfo(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n displayName = j['displayName']\n name = j['name']\n uid = j['id']\n isBanned = j['isBanned']\n joinDate = j['created']\n description = j['description']\n return displayName,name,uid,isBanned,joinDate,description", "def getUserDict(user_file):\n file_handle = open(user_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n user_dict = {}\n counter = 0\n for row in file_reader:\n user_dict[row['USER_ID_hash']] = row\n counter += 1\n assert len(user_dict.keys()) == counter\n\n file_handle.close()\n return user_dict", "def to_dict(self) -> dict:\n return {\n 'author_id': self.id,\n 'fullname': self.fullname\n }", "def getUserInfo(data):\n\tusername = data[\"session_username\"]\n\tuser = Users.objects.filter(username=username).first()\n\n\tresponse = {}\n\n\tif not user:\n\t\treturn {\"Success\": False, \"Error\": \"Unable to retrieve the user information from database\"}\n\n\tresponse[\"Success\"] = True\n\tresponse[\"Username\"] = user.username\n\tresponse[\"Email\"] = user.email\n\tresponse[\"Verified\"] = user.verified\n\tresponse[\"Level\"] = user.level\n\tresponse[\"Experience\"] = user.experience\n\tresponse[\"Coins\"] = user.coins\n\tresponse[\"Preferences\"] = {\"Grid Opacity\": user.pref_grid}\n\n\treturn response", "def _user_status():\n rv = {\n 'messages': map(unicode, get_flashed_messages()),\n }\n if current_user.is_anonymous:\n rv.update({\n 'logged_id': False\n })\n else:\n rv.update({\n 'logged_in': True,\n 'name': current_user.display_name\n })\n return rv", "def serialize(self):\n\n try:\n owner = User.query.get(self.owner_id).username\n except Exception:\n owner = None\n\n return {'id': self.id,\n 'rowId': self.id,\n 'name': self.name,\n 'owner': owner,\n 'key': self.key,\n 'group': self.group.name,\n 'organization': self.group.organization.name,\n 'timeAdded': datetime_to_str(self.time_added),\n }", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def toJSON(self):\n return {\n 'orcid_id': self.orcid_id,\n 'access_token': self.access_token,\n 'created': self.created and self.created.isoformat() or None,\n 'updated': self.updated and self.updated.isoformat() or None,\n 'profile': self.profile and json.loads(self.profile) or None,\n 'info': self.info and json.loads(self.info) or None\n }", "def getInterestedUsers():", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id': u.id, 'admin': u.admin})\n return {'users': usersJSON}", "def _generate_users(self):\n users = {}\n args = self._add_user()\n #Grab info from args\n users[args[\"userID\"]] = {}\n users[args[\"userID\"]][\"name\"] = args[\"name\"]\n users[args[\"userID\"]][\"webhook_url\"] = args[\"webhook_url\"]\n users[args[\"userID\"]][\"blacklist\"] = args[\"blacklist\"]\n #Try to grab override info, default to blank if doesn't exist\n users[args[\"userID\"]][\"override_user\"] = args.get(\"overrideUser\", \"\")\n users[args[\"userID\"]][\"override_userid\"] = args.get(\"overrideUserID\", \"\")\n users[args[\"userID\"]][\"override_oauth\"] = args.get(\"overrideOauth\", \"\")\n fileIO.save_json(\"users.json\", users)" ]
[ "0.67946744", "0.673818", "0.6689629", "0.6652367", "0.66178507", "0.66178507", "0.6612004", "0.6559976", "0.6502731", "0.6489732", "0.6368647", "0.63625497", "0.63397974", "0.6320728", "0.6294594", "0.6281204", "0.62439865", "0.62202954", "0.6208482", "0.6208218", "0.61600155", "0.61533874", "0.61487937", "0.61399746", "0.6135216", "0.6133509", "0.606032", "0.6057293", "0.6027006", "0.6026865", "0.6026385", "0.60262", "0.60262", "0.6023316", "0.6016316", "0.60149825", "0.59553397", "0.595095", "0.59469426", "0.5945283", "0.5932693", "0.590848", "0.590698", "0.58991724", "0.5895343", "0.5889513", "0.58646095", "0.58602417", "0.5860171", "0.58540654", "0.5849214", "0.58449566", "0.58444583", "0.5843768", "0.583522", "0.5831114", "0.58190393", "0.5795732", "0.57943565", "0.578366", "0.57780385", "0.577653", "0.57721436", "0.5768406", "0.57681024", "0.5766762", "0.5760366", "0.5757583", "0.5751944", "0.574957", "0.57385826", "0.57353383", "0.5734462", "0.5734357", "0.5734357", "0.5726697", "0.5717716", "0.57154584", "0.5702654", "0.56936955", "0.5686001", "0.5669787", "0.5662869", "0.56607807", "0.56567645", "0.5654864", "0.5642588", "0.5631334", "0.5624105", "0.56166846", "0.56163967", "0.5607291", "0.56059134", "0.55999875", "0.5580851", "0.5580367", "0.5575109", "0.5573249", "0.5573172", "0.556903" ]
0.6040271
28
Configure TestShib before running the login test
def test_login(self): self._configure_testshib_provider() self._test_login()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def set_up_login():\n\n bitool.app.testing = True\n bitool.app.config['TESTING'] = True\n bitool.app.login_manager.init_app(bitool.app)\n app = bitool.app.test_client()\n\n return app", "def configure_test(self, test, config_json):\n pass", "def test_activate_login(self):\r\n pass", "def setUp(self):\n self.user = {\n \"Email\": \"[email protected]\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass1234\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'", "def setUp(self):\n self.app = app.test_client()\n self.new_user_login = {\n 'username': 'daniel',\n 'password': '[email protected]'\n }\n self.new_user_info = {\n 'username': 'daniel',\n 'fullname': 'daniel jambo',\n 'email': '[email protected]',\n 'password': '[email protected]'\n }", "def setUpConfig(self):\n pass", "def setUp(self):\n\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n self.client = app.test_client()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1", "def setUpClass(cls):\n app.config['TESTING'] = True\n app.config['CSRF_ENABLED'] = False\n app.config['WTF_CSRF_ENABLED'] = False\n app.config['SQLALCHEMY_DATABASE_URI'] = \\\n 'sqlite:///' + TEST_DATABASE_PATH\n\n # Disable login_required for tests\n # Use self.enable_login() context manager to enable for a test\n app.login_manager._login_disabled = True\n\n # Disable session protection, since `follow_redirects=True` doesn't\n # seem to maintain request metadata (e.g. when using 'REMOTE_ADDR')\n # (Is there a better way?)\n app.login_manager.session_protection = None", "def setup_class(self):\n self.endpoint = VERSION_PREFIX + '/auth/login'\n self.test_client = create_app().test_client()", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()", "def setUp(self):\n\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"ABC\"\n self.client = app.test_client()\n\n # Connect to test database\n connect_to_db(app)\n db.drop_all()\n db.create_all()\n load_test()\n\n # Put user1 into session.\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"current_user\"] = 1", "def test_set_session():", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self): \n self.client = app.test_client()\n self.acceso = login(self.client)\n identity_loaded.connect(_on_principal_init)", "def setUp(self):\n #app['TESTING'] = True\n self.test_app = app.test_client()", "def setUp(self):\n super(LoginTest, self).setUp()\n self.login_url = \"http://localhost:5000/login\"\n self.logout_url = \"http://localhost:5000/logout\"\n self.valid_health_card_nb = \"XGCB 1090 0810\"\n self.password = \"password\"\n self.send_post(self.logout_url)\n cache.reset_cache()", "def setUp(self):\n super(TestControlsImport, self).setUp()\n self.client.get(\"/login\")", "def setUpClass(cls, user=''):\n super().setUpClass(first_admin)", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()\n\n\n self.user = {\n\t \"firstname\": \"Michael\",\n\t \"lastname\": \"Mbugua\",\n \"othername\": \"Mike\",\n \"email\": \"[email protected]\",\n \"phoneNumber\": \"0708453901\",\n \"username\": \"Thomas\",\n \"password\": \"Aw3someSauce\"\n \n }", "def setUp(self):\n\n app.config['TESTING'] = True\n self.client = app.test_client()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1", "def setUp(self):\n\n from . import main\n\n from .models import (\n get_engine,\n get_session_factory,\n get_tm_session,\n )\n\n self.config={\n 'admin_password':self.admin_login['password'],\n 'sqlalchemy.url':'sqlite://',\n 'auth.secret':'secret'\n }\n\n self.app = main({}, **self.config)\n self.init_database()\n self.testapp=webtest.TestApp(self.app)", "def test_login_required():\n pass", "def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n app.config['SECRET_KEY'] = 'testingKey'\r\n self.client = app.test_client()\r\n\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess['email'] = \"[email protected]\"", "def setup_module():\n pytest.test_user = fake_user.FakeUser()", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n # initialize the test client\n self.client = self.app.test_client\n # This is the user test json data with a predefined username, email and password\n \n self.user_data = {\n 'user_email': '[email protected]',\n 'password': 'testexample'\n }\n self.user_data_2 = {\n 'user_email': '[email protected]',\n 'password': 'test_123'\n }", "def startTestHook(self):", "def setUp(self):\n self.user = {\n INPUT: \"12345\",\n }", "def setUp(self):\n self.__user = '[email protected]'\n self.__secret = 'BolshoyAdmin123'\n self.__channel = None\n self.__region = 'zrh'\n return", "def setUp(self):\r\n self.app = app.test_client()\r\n self.app.testing = True", "def setUp(self):\n self.data = {'username': 'seiph',\n 'first_name': 'Jean',\n 'last_name': 'Robert',\n 'email': '[email protected]',\n 'password1': 'kevin1234',\n 'password2': 'kevin1234'}", "def setUp(self):\n self.login(self.create_superuser())", "def setUp(self):\n # Get the Flask test client\n self.client = app.test_client()\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\r\n\r\n # Get the Flask test client\r\n self.client = app.test_client()\r\n\r\n # Show Flask errors that happen during tests\r\n app.config['TESTING'] = True", "def setUp(self):\n config = SelectLsstImagesTask.ConfigClass()\n try:\n DbAuth.username(config.host, str(config.port)),\n except RuntimeError as e:\n reason = \"Warning: did not find host=%s, port=%s in your db-auth file; or %s \" \\\n \"skipping unit tests\" % \\\n (config.host, str(config.port), e)\n raise unittest.SkipTest(reason)", "def setUp(self):\n pyauto.PyUITest.setUp(self)\n\n webapp = self.InstallExtension(self.GetWebappPath())\n self.host.LaunchApp(webapp)\n self.account = self.GetPrivateInfo()['test_chromoting_account']", "def setUp(self):\n\n # Get the Flask test client. Client is the browser.\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n self.app = app.test_client()\n self.app.testing = True", "def setUp(self):\r\n super(SysadminBaseTestCase, self).setUp()\r\n self.user = UserFactory.create(username='test_user',\r\n email='[email protected]',\r\n password='foo')\r\n self.client = Client()", "def login(test_app):\n from flask_monitoringdashboard import config\n\n with test_app.session_transaction() as sess:\n sess[config.link + '_logged_in'] = True\n sess[config.link + '_admin'] = True", "def setUp(self):\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n # initialize the test client\n self.client = self.app.test_client\n # This is the user test json data with a predefined email and password\n self.user_data = {\n 'user_email': '[email protected]',\n 'password': 'test123'\n }\n self.user_data_2 = {\n \"user_email\": \"[email protected]\",\n \"password\": \"testexample\"\n }\n self.user_data_3 = {\n \"user_email\": \"example.com\",\n \"password\": \"testexample\"\n }\n\n self.user_data_4 = {\n \"user_email\": \"[email protected]\",\n \"password\": \"tes\"\n }", "def test_config():\n\n # assert create_app().testing\n assert create_app(\"testing\", settings={\n \"TESTING\": True,\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False\n }).testing", "def setUpClass(cls):\n cls._config = imageroller.test.write_config(\n \"config\", CONFIG_SERVER_VALID_MINIMAL, CONFIG_DATA)\n cls._auth = imageroller.test.write_config(\n \"auth\", AUTH_VALID, AUTH_DATA)", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.activity = {'name': 'Shop in Dubai'}\n # test bucket\n self.bucketlist = {'name': 'Go to Egypt for trip'}\n # test user\n self.user_details = {\n 'email': '[email protected]',\n 'password': 'password123'\n }\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.session.close()\n db.drop_all()\n db.create_all()", "def __init__(self):\r\n self.load_config()\r\n self.login()", "def test_config():\n assert not sample.create_app().testing\n assert sample.create_app({\"TESTING\": True}).testing", "def setUp(self):\n self.portal = self.layer['portal']\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n self.portal.invokeFactory('Table', 'table')", "def setUp(self):\n # Reset the database and start a test client. Disable error catching\n # during request handling so that you get better error reports when\n # performing test requests against the application.\n try:\n insta485db = sh.Command(\"./bin/insta485db\")\n insta485db(\"reset\")\n except sh.ErrorReturnCode as error:\n self.assertTrue(False, (\"Failed to run insta485db, \"\n \"output: \"\n \"{}\").format((error).decode('ascii')))\n insta485.app.config[\"TESTING\"] = True\n self.app = insta485.app.test_client()", "def setUp(self):\n self.settings = getattr(settings, \"COOKIELESS\", DEFAULT_SETTINGS)\n self.settings[\"HOSTS\"] = []\n self.browser = Client()\n self.browser.request()\n self.engine = import_module(settings.SESSION_ENGINE)\n self.crypt_sesh = CryptSession()\n self.factory = RequestFactory()\n self.skey = settings.SESSION_COOKIE_NAME\n # This is a bit crap - because matching is fragile and also its\n # reused to split up and grab the session id - TODO: replace with regex\n self.hidden = '<input type=\"hidden\" name=\"%s\" value=\"' % self.skey", "def setUp(self):\n self.new_user = User(username='burens', password='12345')", "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.login(email=common.admin_email, username=common.admin_username)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)", "def setUp(self):\n self.credentials = {\n \"username\": \"BobRobert\",\n \"first_name\": \"Bob\",\n \"last_name\": \"Robert\",\n \"email\": \"[email protected]\",\n \"password\": \"fglZfYmr%?,\",\n }", "def setUp(self):\r\n super(TestCourseListing, self).setUp()\r\n # create and log in a staff user.\r\n self.user = UserFactory(is_staff=True) # pylint: disable=no-member\r\n self.factory = RequestFactory()\r\n self.client = AjaxEnabledTestClient()\r\n self.client.login(username=self.user.username, password='test')", "def setUp(self):\n User.users = {}\n self.app = User('[email protected]', 'admin', 'admin')\n # Set some default user data\n self.user_data = {\n 1: {\n 'email': '[email protected]',\n 'username': 'admin',\n 'password': 'admin' \n }\n \n }", "def setUp(self):\n self.new_credentials = Credentials(\"Facebook\",\"Josphato\",\"jose!!otieno@45\")", "def setUp(self):\n self.shell = DummyShell()\n self.executor = executor.Executor(self.shell)", "def setUp(self) -> None:\n self.app = app.app.test_client()\n self.app.testing = True", "def setUpClass(cls):\n super(test_usage_retention, cls).setUpClass()\n cls.mgmt_client = cls.dbaas_provider.mgmt_client.reddwarfclient\n cls.mgmt_client.authenticate()", "def setUpClass(cls):\n cls._no_section = imageroller.test.write_config(\n \"auth\", AUTH_NO_SECTION, AUTH_DATA)\n cls._no_user = imageroller.test.write_config(\n \"auth\", AUTH_NO_USER, AUTH_DATA)\n cls._blank_user = imageroller.test.write_config(\n \"auth\", AUTH_BLANK_USER, AUTH_DATA)\n cls._no_key = imageroller.test.write_config(\n \"auth\", AUTH_NO_KEY, AUTH_DATA)\n cls._blank_key = imageroller.test.write_config(\n \"auth\", AUTH_BLANK_KEY, AUTH_DATA)\n cls._valid = imageroller.test.write_config(\n \"auth\", AUTH_VALID, AUTH_DATA)", "def setUpClass(cls):\n cls.ins = User()", "def _set_up():\n repl._setUp = self.setUp", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def setUp(self):\n super(TestAssessmentImport, self).setUp()\n self.client.get(\"/login\")", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n\n self.user = UserFactory()\n self.ai = factory.build(dict, FACTORY_CLASS=AiFactory)\n self.ai_details = factory.build(dict, FACTORY_CLASS=AIDetails)\n Profile.objects.create(user=self.user)\n\n self.client.force_login(self.user)\n session = self.client.session\n session['token'] = 'token'\n session.save()", "def setUp(self):\n\n self.user = UserFactory()\n self.ai = factory.build(dict, FACTORY_CLASS=AiFactory)\n self.ai_details = factory.build(dict, FACTORY_CLASS=AIDetails)\n Profile.objects.create(user=self.user)\n\n self.client.force_login(self.user)\n session = self.client.session\n session['token'] = 'token'\n session.save()", "def setUp(self):\n\n self.user = UserFactory()\n self.ai = factory.build(dict, FACTORY_CLASS=AiFactory)\n self.ai_details = factory.build(dict, FACTORY_CLASS=AIDetails)\n Profile.objects.create(user=self.user)\n\n self.client.force_login(self.user)\n session = self.client.session\n session['token'] = 'token'\n session.save()", "def setUp(self):\n\n self.user = UserFactory()\n self.ai = factory.build(dict, FACTORY_CLASS=AiFactory)\n self.ai_details = factory.build(dict, FACTORY_CLASS=AIDetails)\n Profile.objects.create(user=self.user)\n\n self.client.force_login(self.user)\n session = self.client.session\n session['token'] = 'token'\n session.save()", "def setUp(self):\n\n self.user = UserFactory()\n self.ai = factory.build(dict, FACTORY_CLASS=AiFactory)\n self.ai_details = factory.build(dict, FACTORY_CLASS=AIDetails)\n Profile.objects.create(user=self.user)\n\n self.client.force_login(self.user)\n session = self.client.session\n session['token'] = 'token'\n session.save()", "def setUp(self):\n\n self.user = UserFactory()\n self.ai = factory.build(dict, FACTORY_CLASS=AiFactory)\n self.ai_details = factory.build(dict, FACTORY_CLASS=AIDetails)\n Profile.objects.create(user=self.user)\n\n self.client.force_login(self.user)\n session = self.client.session\n session['token'] = 'token'\n session.save()", "def setUp(self):\n self.hello_url = \"http://localhost:7000\"\n self.store_url = self.hello_url + \"/store\"\n self.session = requests.session()", "def setUp(self):\n self.app = create_app(TestingConfig)\n self.client = self.app.test_client\n self.user = {\n \"email\": \"[email protected]\",\n \"firstname\": \"Yeku Wilfred\",\n \"lastname\": \"chetat\",\n \"phone\": \"671357962\",\n \"password\": \"weezybaby\"\n }\n\n with self.app.app_context():\n # create all tables\n db.create_all()\n initialize_db()", "def test_start_new_verification(self):\r\n user = UserFactory.create(username=\"rusty\", password=\"test\")\r\n self.client.login(username=\"rusty\", password=\"test\")", "def test_successful_login(self):\n pass", "def setUp(self):\n self.app = app.test_client()", "def setUp(self):\n self.app = app.test_client()", "def setUpClass(cls):\n cls._token = cls._api.login(username=USER, password=PASS)", "def setUp(self):\n\n self.client = server.app.test_client()\n server.app.config['TESTING'] = True\n server.app.config['SECRET_KEY'] = \"123\"\n\n # Connect to test database\n model.connect_to_db(server.app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n model.db.create_all()\n # example_data()\n\n with self.client as c:\n with c.session_transaction() as session:\n session['user_id'] = 33\n session['username'] = 'balloonicorn'\n session['name'] = 'balloonicorn'", "def setUp(self):\n MainTests.setUp(self)", "def setUpClass(cls):\n cls.user_info = dict(email=\"[email protected]\", password=\"thetrueeredar\", firstname=\"Archimonde\",\n familyname=\"the defiler\", gender=\"male\", city=\"Mac'Aree\", country=\"Argus\")", "def setUp(self):\n self.app = Flask(__name__)\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"key\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"dietitian_id\"] = 2", "def test_m19bqj_236(setup):\n tiger_obj = Tiger(browser_name=browser_name)\n tiger_obj.login(username=username, password=password)", "def setUp(self):\n test_env_setup()", "def test_auth0_config_admin(testapp, registry):\n _test_auth_config(testapp, registry)", "def setup_method(self, test_method):\n self.wo_obj = TestCases()\n self.global_config, self.test_args = self.wo_obj.get_config_data(test_method=test_method.__name__)", "def test_config():\n assert not create_app().testing\n assert create_app(TestConfig).testing", "def setUpClass(cls):\n super(IronicTest, cls).setUpClass()\n if cls.manager.clients_initialized:\n cls.usr = cls.config.compute.controller_node_ssh_user\n cls.pwd = cls.config.compute.controller_node_ssh_password\n cls.key = cls.config.compute.path_to_private_key\n cls.timeout = cls.config.compute.ssh_timeout\n if not cls.ironic_client:\n LOG.warning('Ironic client was not initialized')", "def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()" ]
[ "0.68922627", "0.6773958", "0.6753465", "0.6616434", "0.65026134", "0.6428037", "0.6423407", "0.6388931", "0.63668215", "0.6346543", "0.63392085", "0.6336799", "0.62982786", "0.62645006", "0.62591666", "0.62502235", "0.62502235", "0.62462556", "0.62462556", "0.62462556", "0.62439245", "0.6243138", "0.6242279", "0.62198436", "0.62160385", "0.6207759", "0.620737", "0.61967075", "0.6193627", "0.6192629", "0.6178201", "0.61690265", "0.61298364", "0.6109236", "0.6105475", "0.60819715", "0.6078138", "0.60747445", "0.60706854", "0.60609996", "0.6041724", "0.6024731", "0.6015398", "0.6014138", "0.6012665", "0.6008123", "0.6006577", "0.6002008", "0.60009974", "0.60009974", "0.59973055", "0.59869003", "0.5982677", "0.59764695", "0.5971104", "0.5968228", "0.5967086", "0.59661263", "0.5954581", "0.5944629", "0.5922501", "0.5918829", "0.59110576", "0.58968043", "0.588891", "0.5888597", "0.5888358", "0.5878361", "0.58750486", "0.5874658", "0.58698046", "0.5867704", "0.58551973", "0.5852011", "0.5852011", "0.58499795", "0.58499795", "0.58499795", "0.58499795", "0.58499795", "0.58499795", "0.5836792", "0.5835316", "0.583065", "0.58263415", "0.58235687", "0.58235687", "0.58058435", "0.580321", "0.57959175", "0.5794423", "0.5794244", "0.57888377", "0.57863486", "0.57820237", "0.5771397", "0.5769147", "0.57685566", "0.57657796", "0.5763915" ]
0.81601787
0
Configure TestShib before running the register test
def test_register(self): self._configure_testshib_provider() self._test_register()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_test(self, test, config_json):\n pass", "def test_register():\n plug.manager.register(junit4)", "def setUpConfig(self):\n pass", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def setup_method(self, test_method):\n self.wo_obj = TestCases()\n self.global_config, self.test_args = self.wo_obj.get_config_data(test_method=test_method.__name__)", "def startTestHook(self):", "def setUp(self):\n self.modules = {}", "def runTest(self):\n self.setUp()\n self.test_ExtendSpine1()", "def test_scrapping(self):\n self.assertEqual(ScrappingConfig.name, \"scrapping\")", "def config_setup(self, config):\n super(PushGatewayApiV1TestCase, self).config_setup(config)\n config[\"apps\"][\"com.example.spqr\"] = {\n \"type\": \"tests.test_pushgateway_api_v1.TestPushkin\"\n }", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def testInit(self):\n self.globalInit()\n self.test.start()", "def test_install(self):\n pass", "def setUp(self):\n event_bus._event_bus = event_bus._EventBus()", "def _set_up():\n repl._setUp = self.setUp", "def setUpModule():\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer(False)", "def setUp(self):\n #app['TESTING'] = True\n self.test_app = app.test_client()", "def setUp_extra(self):\n pass", "def setUp(self):\n self.supvisors = DummySupvisors()", "def setUpClass(cls):\n super().setUpClass(application_name='ovn-chassis')", "def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def configure(self) -> None:", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_setup(self):\n\n transformer = SnmpTransformer()\n transformer.setup(\"test\",{\n \"mib_dir\" : \"/tmp/\"\n })", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n self", "def setUp(self):\n self", "def configure(self):\r\n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n self.shell = DummyShell()\n self.executor = executor.Executor(self.shell)", "def _configure(self):\n test_lib.test_config.setdefault('config_files', []).append(\n self.filename)\n self._write_config_content()", "def setUp(self) :\n pass", "def setUp(self):\r\n pass", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUpClass(self):\n super(TestExpedition, self).setUpClass()", "def startTestRun(self):", "def configure(self):\n pass", "def configure(self):\n pass", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'", "def setUp(self):\n # Let's install a bundle to use in tests\n self.run_function(\"assistive.install\", [OSA_SCRIPT, True])", "def setUpModule():\n # pull in test environment as dict\n global TestEnv\n get_test_env(TestEnv)", "def setUpModule():\n # pull in test environment as dict\n global TestEnv\n get_test_env(TestEnv)", "def setUp(self):\n self.hass = get_test_home_assistant()", "def setUp(self):\n self.hass = get_test_home_assistant()", "def setUp(self):\n test_env_setup()", "def test_module_initialisation_method_call_only(app_config):\n bootstrap._initialize_modules(app_config)", "def test_setup_module(self):\n pluggable_package.setup(self._test_module)\n self._test_setup(self._test_module)", "def setup( self ):", "def setUp(self):\n raise NotImplementedError", "def setUpModule(): # noqa\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer()", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):\n MainTests.setUp(self)", "def setUpClass(self):", "def setUpModule():\n global _tmpdir, tmpdir\n _tmpdir = tempfile.TemporaryDirectory(prefix='file2wsb-', dir=TEMP_DIR)\n tmpdir = os.path.realpath(_tmpdir.name)\n\n # mock out user config\n global mockings\n mockings = [\n mock.patch('webscrapbook.scrapbook.host.WSB_USER_DIR', os.path.join(tmpdir, 'wsb')),\n mock.patch('webscrapbook.WSB_USER_DIR', os.path.join(tmpdir, 'wsb')),\n mock.patch('webscrapbook.WSB_USER_CONFIG', tmpdir),\n ]\n for mocking in mockings:\n mocking.start()", "def setUp(self):\n\n pass" ]
[ "0.7068733", "0.6525172", "0.6467879", "0.63710594", "0.6313307", "0.6234248", "0.6212751", "0.60972005", "0.6043006", "0.6039801", "0.5995316", "0.5995316", "0.5995316", "0.5995316", "0.5992337", "0.5989009", "0.59598655", "0.5953964", "0.5928903", "0.59063", "0.5904783", "0.5883329", "0.5874261", "0.5873353", "0.5849461", "0.5849461", "0.5849461", "0.5846878", "0.5846878", "0.5832195", "0.5820069", "0.5816768", "0.5810724", "0.5810724", "0.5802884", "0.5802884", "0.57990384", "0.5789393", "0.5789393", "0.5789393", "0.5789393", "0.5789393", "0.5789393", "0.5789393", "0.5789393", "0.5789393", "0.5786271", "0.5780211", "0.5777298", "0.5777089", "0.57752043", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.5774701", "0.577335", "0.57696474", "0.57694656", "0.57694656", "0.57691747", "0.57681453", "0.5761062", "0.5761062", "0.5759554", "0.5759554", "0.5758762", "0.5749171", "0.57485694", "0.57451504", "0.57429725", "0.573832", "0.57379246", "0.57338035", "0.57320774", "0.5731168", "0.5726997", "0.57193464" ]
0.83610606
0
Test that attributes sent by a SAML provider are stored in the UserSocialAuth table.
def test_login_records_attributes(self): self.test_login() record = UserSocialAuth.objects.get( user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG ) attributes = record.extra_data assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1.1.9') == ['[email protected]', '[email protected]'] assert attributes.get('urn:oid:2.5.4.3') == ['Me Myself And I'] assert attributes.get('urn:oid:0.9.2342.19200300.100.1.1') == ['myself'] assert attributes.get('urn:oid:2.5.4.20') == ['555-5555'] # Phone number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_attribute_authenticated_has_attributes(testapp, login_fixture, fill_the_db):\n response = testapp.get('/attribute/1/1', params=login_fixture)\n assert len(response.html.find_all(\"img\")) == 2", "def test_attributes(self):\n user = User()\n self.assertTrue(hasattr(user, \"email\"))\n self.assertTrue(hasattr(user, \"password\"))\n self.assertTrue(hasattr(user, \"first_name\"))\n self.assertTrue(hasattr(user, \"last_name\"))", "def test_attributes(self):\n self.assertTrue(hasattr(User()), \"email\")\n self.assertTrue(hasattr(User()), \"password\")\n self.assertTrue(hasattr(User()), \"first_name\")\n self.assertTrue(hasattr(User()), \"last_name\")\n self.assertTrue(hasattr(User()), \"__init__\")", "def _test_assessment_users(self, asmt, users):\n verification_errors = \"\"\n for user_name, expected_types in users.items():\n try:\n user = models.Person.query.filter_by(name=user_name).first()\n rel = models.Relationship.find_related(asmt, user)\n if expected_types:\n self.assertNotEqual(\n rel, None,\n \"User {} is not mapped to {}\".format(user.email, asmt.slug))\n self.assertIn(\"AssigneeType\", rel.relationship_attrs)\n self.assertEqual(\n set(rel.relationship_attrs[\n \"AssigneeType\"].attr_value.split(\",\")),\n expected_types\n )\n else:\n self.assertEqual(\n rel, None,\n \"User {} is mapped to {}\".format(user.email, asmt.slug))\n except AssertionError as error:\n verification_errors += \"\\n\\nChecks for Users-Assessment mapping \"\\\n \"failed for user '{}' with:\\n{}\".format(user_name, str(error))\n\n self.assertEqual(verification_errors, \"\", verification_errors)", "def test_attributes(self):\n u = User.query.filter_by(username=\"jjones\").first()\n assert u.username == \"jjones\"\n assert u.email == \"[email protected]\"\n assert len(u.reviews) == 4\n assert u.email_verified is False\n assert u._email_token_key == 'verify_email'\n assert u._password_token_key == 'reset_password'\n assert u.sentfriendrequests == []\n assert u.receivedfriendrequests == []\n assert u.sentgrouprequests == []\n u2 = User.query.get(1)\n assert u2 in u.friends\n assert type(u.address) == Address", "def test_activity_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"activity\"))\n if models.storage_t == 'db':\n self.assertEqual(student.activity, None)\n else:\n self.assertEqual(student.activity, \"\")", "def assert_social_auth_exists_for_user(self, user, strategy):\r\n social_auths = strategy.storage.user.get_social_auth_for_user(\r\n user, provider=self.PROVIDER_CLASS.BACKEND_CLASS.name)\r\n self.assertEqual(1, len(social_auths))\r\n self.assertEqual(self.backend_name, social_auths[0].provider)", "def test_first_name_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"first_name\"))\n if models.storage_t == 'db':\n self.assertEqual(student.first_name, \"Joe\")\n else:\n self.assertEqual(student.first_name, \"Joe\")", "def test_attribute_view_authenticated(testapp, fill_the_db, login_fixture):\n response = testapp.get('/attribute/1/1', params=login_fixture)\n assert response.status_code == 200", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_claims_supported_set(self):\n expected_claims = ['openid', 'email']\n\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], expected_claims)", "def test_add_authenticated_session_var(self):\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n session['foo'] = 'bar'\r\n session.save()\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='john'\"\r\n \"AND name='foo'\") \r\n self.assertEqual('bar', cursor.fetchone()[0])", "def test_read_user_identity_mapping(self):\n pass", "def testMetadata(self):\n self.assertGreater(len(self.unauth.metadata(self.dataset)), 0)\n self.assertGreater(len(self.auth.metadata(self.dataset)), 0)", "def test_all_user_active(self):\n procedure = Procedure.objects.first()\n students = Student.objects.filter(promotion=procedure.promotion)\n for student in students:\n self.assertEqual(student.user.is_active, True)", "def test_user_attrs(self):\n # These are 'functional' level tests for common use cases. Direct\n # testing of the implementation (SimpleLazyObject) is in the 'utils'\n # tests.\n self.client.login(username=\"super\", password=\"secret\")\n user = authenticate(username=\"super\", password=\"secret\")\n response = self.client.get(\"/auth_processor_user/\")\n self.assertContains(response, \"unicode: super\")\n self.assertContains(response, \"id: %d\" % self.superuser.pk)\n self.assertContains(response, \"username: super\")\n # bug #12037 is tested by the {% url %} in the template:\n self.assertContains(response, \"url: /userpage/super/\")\n\n # A Q() comparing a user and with another Q() (in an AND or OR fashion).\n Q(user=response.context[\"user\"]) & Q(someflag=True)\n\n # Tests for user equality. This is hard because User defines\n # equality in a non-duck-typing way\n # See bug #12060\n self.assertEqual(response.context[\"user\"], user)\n self.assertEqual(user, response.context[\"user\"])", "def test_get_attributes(test_common_dao):\n _session = test_common_dao.RAMSTK_SESSION(\n bind=test_common_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKSiteInfo).first()\n\n assert DUT.get_attributes() == ATTRIBUTES", "def test_dataset_for_personal_accounts(self):\n pass", "def test_user_login(self):\n\n for i in range(0, len(self.users)):\n\n # Gets user\n user = self.users[i]\n\n # Creates payload\n event = {\n \"username\": user['username'],\n \"pwd\": user['pwd']\n }\n\n # Invokes\n response = handler.user_login(event=event, context=None)\n\n # Validates response\n body_dict = json.loads(response['body'])\n apidataset_dict = body_dict['apidataset']\n self.assertEqual(response['statusCode'], 200)\n self.assertEqual (\n apidataset_dict['displayName'],\n user['nameFirst'] + ' ' + user['nameLast']\n )\n self.assertIn('sessionToken', apidataset_dict)", "def test_create_user_identity_mapping(self):\n pass", "def test_attribute_types(self):\n self.assertIsInstance(self.user_1.email, str)\n self.assertIsInstance(self.user_1.password, str)\n self.assertIsInstance(self.user_1.first_name, str)\n self.assertIsInstance(self.user_1.last_name, str)", "def test_attributeCopied(self):\n self.assertIdentical(\n self.store.findUnique(AMPConfiguration).loginSystem,\n self.store.findUnique(LoginSystem))", "def test_profile_associated_with_users(self):\n profile = ImagerProfile.objects.first()\n self.assertTrue(hasattr(profile, 'user'))\n self.assertIsInstance(profile.user, User)", "def test_is_student_user(self):\n student = User.objects.get(email='[email protected]')\n self.assertEqual(student.is_staff, False)", "def acs(r):\n saml_client = _get_saml_client(get_current_domain(r))\n resp = r.POST.get('SAMLResponse', None)\n next_url = r.session.get('login_next_url')\n\n authn_response = saml_client.parse_authn_request_response(\n resp, entity.BINDING_HTTP_POST)\n if authn_response is None:\n return HttpResponse(\"Error at line 115\")\n\n user_identity = authn_response.get_identity()\n if user_identity is None:\n return HttpResponse(\"Error at line 118\")\n\n\n user_email = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('email', 'Email')\n ][0]\n user_name = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('username', 'UserName')\n ][0]\n user_first_name = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('first_name', 'FirstName')\n ][0]\n user_last_name = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('last_name', 'LastName')\n ][0]\n\n target_user = None\n is_new_user = False\n\n try:\n target_user = User.objects.get(username=user_name)\n if settings.SAML2_AUTH.get('TRIGGER', {}).get('BEFORE_LOGIN', None):\n import_string(\n settings.SAML2_AUTH['TRIGGER']['BEFORE_LOGIN']\n )(user_identity)\n except User.DoesNotExist:\n target_user = _create_new_user(\n user_name, user_email,\n user_first_name, user_last_name\n )\n if settings.SAML2_AUTH.get('TRIGGER', {}).get('CREATE_USER', None):\n import_string(\n settings.SAML2_AUTH['TRIGGER']['CREATE_USER']\n )(user_identity)\n is_new_user = True\n\n r.session.flush()\n\n if target_user.is_active:\n target_user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(r, target_user)\n else:\n return HttpResponse(\"Error at line 169\")\n\n if is_new_user:\n try:\n return render(\n r, 'django_saml2_auth/welcome.html',\n {'user': r.user}\n )\n except TemplateDoesNotExist:\n return HttpResponseRedirect(next_url)\n else:\n return HttpResponseRedirect(next_url)", "def test_user_information_request(self):\n pass", "def test_creation_profile_2():\n assert tuple_NT[0][1] == LIST_dict[0]['sex'], \"sex of profile is not getting stored properly\"", "def test_modify_authenticated_session_var(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('john', 1, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('john', 1, 'foo', 'bar')\")\r\n\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute \"\r\n \"WHERE sid='john' AND name='foo'\") \r\n self.assertEqual('baz', cursor.fetchone()[0])", "def test_response_attributes(self):\n # Same id, data and user_id\n id = data = user_id = get_rand_string()\n self.conn.add(id=id, user_id=user_id, data=data)\n self.conn.commit()\n\n response = self.conn.query(q=\"id:\" + id)\n expected_attrs = [\"numFound\", \"start\", \"maxScore\", \"header\"]\n\n for attr in expected_attrs:\n self.assertTrue(attr in dir(response),\n \"Attribute %s not found in response. id:%s\" % (attr, id))\n\n value = getattr(response, attr)\n self.assertTrue(bool(value),\n \"Attribute %s has no value. id:%s\" % (attr,id))", "def test_display_authenticated_has_string(testapp, fill_the_db, login_fixture):\n response = testapp.get(\"/display/1/1/1\", params=login_fixture)\n display_h1 = response.html.find_all('h1')[1]\n assert \"user name, category attribute\" in display_h1", "def test_professor_can_login_to_web_portal(professor):", "def test_identity(self):\n me = self.d.identity()\n self.assertEqual(me.data['consumer_name'], 'Test Client')\n self.assertEqual(me, self.d.user('example'))", "def assert_social_auth_does_not_exist_for_user(self, user, strategy):\r\n social_auths = strategy.storage.user.get_social_auth_for_user(\r\n user, provider=self.PROVIDER_CLASS.BACKEND_CLASS.name)\r\n self.assertEqual(0, len(social_auths))", "def test_user_set_profile():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/profile')\n with client.session_transaction() as sess:\n data = {\n 'name': 'user',\n 'email': '[email protected]',\n 'confirm': '',\n 'password': '',\n 'affiliation': 'affiliation_test',\n 'website': 'https://ctfd.io',\n 'country': 'United States of America',\n 'nonce': sess.get('nonce')\n }\n\n r = client.post('/profile', data=data)\n assert r.status_code == 302\n\n user = Teams.query.filter_by(id=2).first()\n assert user.affiliation == 'affiliation_test'\n assert user.website == 'https://ctfd.io'\n assert user.country == 'United States of America'\n destroy_ctfd(app)", "def test_set_attributes(test_common_dao):\n _session = test_common_dao.RAMSTK_SESSION(\n bind=test_common_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKSiteInfo).first()\n\n _error_code, _msg = DUT.set_attributes(ATTRIBUTES)\n\n assert _error_code == 0\n assert _msg == (\"RAMSTK SUCCESS: Updating RAMSTKSiteInfo attributes.\")", "def test_components_profile(self):\r\n\t\tprofile = Profile.objects.get(bio=\"I'm a female profile with inserted components\")\r\n\t\tself.assertEqual(self.u1.profile, profile)", "def test_setting_csv_auth(self):\n path = reverse(\"setting-csv\")\n request = RequestFactory().get(path)\n request.user = mixer.blend(User)\n response = csv_setting(request)\n assert response.status_code == 200", "def test_retrieve(self):\n users = CalendallUser.objects.all()\n self.check_attrs_helper(users)", "def check_user_data_in_response(response_data):\n assert response_data[\"id\"] > 0\n assert response_data[\"name\"] == pytest.test_user.name\n assert response_data[\"email\"] == pytest.test_user.email\n assert response_data[\"gender\"] == pytest.test_user.gender\n assert response_data[\"status\"] == pytest.test_user.status", "def is_tacoma_student():\n return _is_member('uw_affiliation_tacoma-student')", "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_sso_user() -> dict:\n create_user_response = SSO_API_CLIENT.post(\n \"/testapi/test-users/\", data={}, authenticator=BASIC_AUTHENTICATOR\n )\n assert create_user_response.status_code == HTTP_200_OK\n return create_user_response.json()", "def test_ssl_login_without_signup_lms(self):\r\n\r\n external_auth.views.ssl_login(self._create_ssl_request('/'))\r\n\r\n # Assert our user exists in both eamap and Users, and that we are logged in\r\n try:\r\n ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)\r\n except ExternalAuthMap.DoesNotExist, ex:\r\n self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))\r\n try:\r\n User.objects.get(email=self.USER_EMAIL)\r\n except ExternalAuthMap.DoesNotExist, ex:\r\n self.fail('User did not get properly added to internal users, exception was {0}'.format(str(ex)))", "def test_login_view_with_iam(self):\n form = {\n 'aws_access_key': 'AWS access key',\n 'aws_secret_access_key': 'AWS secret key',\n }\n response = self.client.post('/login/', form, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.request['PATH_INFO'], '/profile/')", "def test_init(self):\n self.assertTrue(self.new_user.profile.bio == \"Hi!\")", "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_session_is_accessed(self):\n response = self.client.get(\"/auth_processor_attr_access/\")\n self.assertContains(response, \"Session accessed\")", "def sso_test_create_user(request, idp_slug):\n if settings.SERVER_ENVIRONMENT not in ['staging']:\n raise Http404()\n\n username = request.GET.get('username')\n if username:\n prepare_session_with_sso_username(request, username)\n\n invitation_uuid = request.GET.get('invitation')\n invitation = Invitation.objects.get(uuid=invitation_uuid)\n if invitation:\n prepare_session_for_sso_invitation(request, invitation)\n\n return HttpResponseRedirect(reverse(\"sso_saml_login\", args=(idp_slug,)))", "def test_record(self):\n self.assertEqual(self.record.attrib['id'],\n 'nhc_def_conf_adt_user',\n 'Incorrect ID ')\n self.assertEqual(self.record.attrib['model'],\n 'res.users',\n 'Incorrect model')", "def test_ssl_login_with_signup_lms(self):\r\n\r\n response = external_auth.views.ssl_login(self._create_ssl_request('/'))\r\n\r\n # Response should contain template for signup form, eamap should have user, and internal\r\n # auth should not have a user\r\n self.assertIn('<form role=\"form\" id=\"register-form\" method=\"post\"', response.content)\r\n try:\r\n ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)\r\n except ExternalAuthMap.DoesNotExist, ex:\r\n self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))\r\n\r\n with self.assertRaises(User.DoesNotExist):\r\n User.objects.get(email=self.USER_EMAIL)", "def test_last_name_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"last_name\"))\n if models.storage_t == 'db':\n self.assertEqual(student.last_name, None)\n else:\n self.assertEqual(student.last_name, \"\")", "def is_authorized(self, attributes, attribute_mapping):\n LOG.debug('is_authorized() attributes = %s' % attributes)\n LOG.debug('is_authorized() attribute_mapping = %s' % attribute_mapping)\n return True", "def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def test_single_success():\n test_username = \"test_user\"\n\n user = UserFactory.create(username=test_username, is_active=True)\n UserSocialAuthFactory.create(user=user, provider=\"edX\")\n\n assert user.is_active is True\n assert \"retired_email\" not in user.email\n assert UserSocialAuth.objects.filter(user=user).count() == 1\n\n COMMAND.handle(\"retire_users\", users=[test_username])\n\n user.refresh_from_db()\n assert user.is_active is False\n assert \"retired_email\" in user.email\n assert UserSocialAuth.objects.filter(user=user).count() == 0", "def test_user(self):\n\n user = User.query.filter(User.user_fname == \"Smokey\").first()\n self.assertEqual(user.user_fname, \"Smokey\")", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.UserDetails), 1)", "def test_attributes(self):\n creds = NokiaCredentials(access_token=1, token_expiry=2, token_type=3,\n refresh_token=4, user_id=5, client_id=6,\n consumer_secret=7)\n assert hasattr(creds, 'access_token')\n self.assertEqual(creds.access_token, 1)\n assert hasattr(creds, 'token_expiry')\n self.assertEqual(creds.token_expiry, 2)\n assert hasattr(creds, 'token_type')\n self.assertEqual(creds.token_type, 3)\n assert hasattr(creds, 'refresh_token')\n self.assertEqual(creds.refresh_token, 4)\n assert hasattr(creds, 'user_id')\n self.assertEqual(creds.user_id, 5)\n assert hasattr(creds, 'client_id')\n self.assertEqual(creds.client_id, 6)\n assert hasattr(creds, 'consumer_secret')\n self.assertEqual(creds.consumer_secret, 7)", "def test_first_name_attr(self):\n user = User()\n self.assertTrue(hasattr(user, \"first_name\"))\n self.assertEqual(user.first_name, \"\")", "def test_known_user(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n known_user = get_user_model().objects.create_user(\n \"test_auth_backend_user1\",\n email=\"[email protected]\",\n public_username=\"ashley\",\n lti_consumer=consumer,\n lti_remote_user_id=\"ashley\",\n )\n\n user_count = get_user_model().objects.count()\n\n auth_user = self._authenticate(\n {\n \"user_id\": \"643f1625-f240-4a5a-b6eb-89b317807963\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"lis_person_sourcedid\": \"ashley\",\n },\n passport,\n )\n self.assertEqual(known_user, auth_user)\n self.assertEqual(user_count, get_user_model().objects.count())", "def test_student_signup(self):\n post = {'email': '[email protected]', 'first_name': 'Tom',\n 'last_name': 'Student', 'user_type': 'student',\n 'password': '1234'}\n response = self.client.post(self.signup_student_url, post)\n self.assertRedirects(response, reverse('home'))\n SchoolUser.objects.get(username='[email protected]')", "def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())", "def test_meta(self):\r\n\r\n class Resource(View, AuthMixin):\r\n\r\n class Meta:\r\n model = 'core.pirate'\r\n authenticators = UserAuthenticator\r\n\r\n self.assertTrue(Resource._meta)\r\n self.assertTrue(Resource._meta.authenticators)\r\n self.assertEqual(Resource._meta.authenticators, (UserAuthenticator,))", "def test_user_auth(self):\n self.new_user.save_login()\n test_user=User(\"trinity\",\"[email protected]\",\"123\")\n test_user.save_login()\n self.assertTrue(self.new_user.users_auth(\"trinity\",\"123\"))", "def assertion_callback(_request, _uri, headers):\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')", "def test_email_attr(self):\n user = User()\n self.assertTrue(hasattr(user, \"email\"))\n self.assertEqual(user.email, \"\")", "def test_Profile(self):\n self.assertEquals(self.user_1.username, 'testuser')\n # self.assertEquals(self.user_1.password, '12345')\n self.assertEquals(self.user_1.email,\n '[email protected]')", "def test_enumerate_saml_roles(self):\n responses.add(responses.POST, 'https://signin.aws.amazon.com/saml', status=200, body=self.aws_signinpage)\n result = self.resolver._enumerate_saml_roles(self.saml, 'https://signin.aws.amazon.com/saml')\n assert_equals(result[0], self.roles[0])", "def test_create_student(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())", "def test_init(self):\n self.assertTrue(self.profile.bio == \"very awesome\")", "def test_shib_login_enrollment(self):\r\n student = UserFactory.create()\r\n extauth = ExternalAuthMap(external_id='[email protected]',\r\n external_email='',\r\n external_domain='shib:https://idp.stanford.edu/',\r\n external_credentials=\"\",\r\n internal_password=\"password\",\r\n user=student)\r\n student.set_password(\"password\")\r\n student.save()\r\n extauth.save()\r\n\r\n course = CourseFactory.create(org='Stanford', number='123', display_name='Shib Only')\r\n course.enrollment_domain = 'shib:https://idp.stanford.edu/'\r\n self.store.update_item(course, '**replace_user**')\r\n\r\n # use django test client for sessions and url processing\r\n # no enrollment before trying\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n self.client.logout()\r\n request_kwargs = {'path': '/shib-login/',\r\n 'data': {'enrollment_action': 'enroll', 'course_id': course.id.to_deprecated_string(), 'next': '/testredirect'},\r\n 'follow': False,\r\n 'REMOTE_USER': '[email protected]',\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/'}\r\n response = self.client.get(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n # Clean up and try again with POST (doesn't happen with real production shib, doing this for test coverage)\r\n self.client.logout()\r\n CourseEnrollment.unenroll(student, course.id)\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n response = self.client.post(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))", "def _verify_user_existence(self, user_exists, social_link_exists, user_is_active=None, username=None):\n users = User.objects.filter(username=(username if username else \"test_username\"))\n assert users.exists() == user_exists\n if user_exists:\n assert users[0].is_active == user_is_active\n self.assertEqual(\n UserSocialAuth.objects.filter(user=users[0], provider=self.BACKEND).exists(),\n social_link_exists\n )\n else:\n assert UserSocialAuth.objects.count() == 0", "def test_attributes(self):\n application = self.subStore.findUnique(PrivateApplication)\n self.assertEqual(application.preferredTheme, PREFERRED_THEME)\n self.assertEqual(application.privateKey, PRIVATE_KEY)\n\n website = self.subStore.findUnique(WebSite)\n self.assertIdentical(application.website, website)\n\n customizedPublicPage = self.subStore.findUnique(CustomizedPublicPage)\n self.assertIdentical(\n application.customizedPublicPage, customizedPublicPage)\n\n authenticationApplication = self.subStore.findUnique(\n AuthenticationApplication)\n self.assertIdentical(\n application.authenticationApplication, authenticationApplication)\n\n preferenceAggregator = self.subStore.findUnique(PreferenceAggregator)\n self.assertIdentical(\n application.preferenceAggregator, preferenceAggregator)\n\n defaultPreferenceCollection = self.subStore.findUnique(\n DefaultPreferenceCollection)\n self.assertIdentical(\n application.defaultPreferenceCollection,\n defaultPreferenceCollection)\n\n searchAggregator = self.subStore.findUnique(SearchAggregator)\n self.assertIdentical(application.searchAggregator, searchAggregator)\n\n self.assertIdentical(application.privateIndexPage, None)", "def test_model_metadata_values(self):\n self.assertEqual(self.meta['author'], 'Giang Nguyen, Stefan Dlugolinsky')\n self.assertEqual(self.meta['author-email'], '[email protected], [email protected]')", "def test_read_identity(self):\n pass", "def init_saml_auth(saml_prepared_flask_request):\n return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=app.config.get('SAML_PATH', None))", "def test_user_profiles(self):\n\n result = self.client.get(\"/profile/1\")\n self.assertIn(b'In house:',result.data)", "def test_create_identity(self):\n pass", "def testPersonIsUser(self):\n member = self.portal.portal_membership.getMemberById('abc123')\n self.failUnless(member,\"%s\" % member)", "def test_list_users(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.get(reverse('user-list'))\n self.assertEqual(json.loads(response.content)['count'], 2)\n\n # Users are ordered alphabetically by email\n first_user = json.loads(response.content)['results'][0]\n second_user = json.loads(response.content)['results'][1]\n self.assertEqual(first_user['email'], self.admin.email)\n\n membership = {\n 'url': 'http://testserver/memberships/' + str(self.membership.id),\n 'id': self.membership.id,\n 'name': 'basic_membership',\n 'available': True,\n 'available_on_product_types': [],\n 'available_on_products': [],\n 'options': [],\n 'picture': None,\n 'price': '50.00',\n 'details': '1-Year student membership',\n 'duration': '365 00:00:00',\n 'available_on_retreat_types': [],\n 'academic_levels': ['http://testserver/academic_levels/' +\n str(self.academic_level.id)]\n }\n\n self.assertEqual(\n remove_translation_fields(second_user['membership']),\n membership\n )\n\n # Check the system doesn't return attributes not expected\n attributes = [\n 'id',\n 'url',\n 'email',\n 'first_name',\n 'last_name',\n 'is_active',\n 'phone',\n 'other_phone',\n 'is_superuser',\n 'is_staff',\n 'university',\n 'last_login',\n 'date_joined',\n 'academic_level',\n 'academic_field',\n 'gender',\n 'language',\n 'birthdate',\n 'groups',\n 'user_permissions',\n 'tickets',\n 'membership',\n 'membership_end',\n 'city',\n 'personnal_restrictions',\n 'academic_program_code',\n 'faculty',\n 'student_number',\n 'volunteer_for_workplace',\n 'hide_newsletter',\n 'is_in_newsletter',\n 'number_of_free_virtual_retreat',\n 'membership_end_notification',\n 'get_number_of_past_tomatoes',\n 'get_number_of_future_tomatoes',\n 'last_acceptation_terms_and_conditions',\n 'tomato_field_matrix',\n 'current_month_tomatoes',\n ]\n for key in first_user.keys():\n self.assertTrue(\n key in attributes,\n 'Attribute \"{0}\" is not expected but is '\n 'returned by the system.'.format(key)\n )\n attributes.remove(key)\n\n # Ensure the system returns all expected attributes\n self.assertTrue(\n len(attributes) == 0,\n 'The system failed to return some '\n 'attributes : {0}'.format(attributes)\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_100(self):\n primary_str, equivalent_set = gmn.app.middleware.session_cert.get_authenticated_subjects(\n self.cert_simple_subject_info_pem\n )\n self.assertEqual(\n primary_str,\n 'CN=Roger Dahl A1779,O=Google,C=US,DC=cilogon,DC=org',\n )\n self.assertListEqual(\n sorted(equivalent_set),\n [\n 'CN=Roger Dahl A1779,O=Google,C=US,DC=cilogon,DC=org',\n 'authenticatedUser',\n 'public',\n 'verifiedUser',\n ],\n )", "def testSessionCreate(self):\n success = False\n attr = None\n\n try:\n attr = self.session.create_visit_attr()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(attr is None)", "def test_sso_user_verified(test_sso_user) -> dict:\n verify_response = SSO_API_CLIENT.patch(\n url=f\"testapi/user-by-email/{test_sso_user['email']}/\",\n data={\"is_verified\": True},\n authenticator=BASIC_AUTHENTICATOR,\n )\n assert verify_response.status_code == HTTP_204_NO_CONTENT\n return test_sso_user", "def test_automatic_default_public_username_role_instructor(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"3fd0ff83-a62d-4a12-9716-4d48821ae24f\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"roles\": \"Instructor\",\n },\n passport,\n )\n\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\n \"3fd0ff83-a62d-4a12-9716-4d48821ae24f@consumer\", new_user.username\n )\n self.assertEqual(user_count + 1, get_user_model().objects.count())\n self.assertEqual(\"Educational team\", new_user.public_username)", "def test_userinfo(self):\n self.assertEqual(self.gmail_case.userinfo, None)\n self.assertEqual(self.foo_case.userinfo, 'herp')", "def test_user(self):\n return True", "def test_get_students_for_contact(self):\n pass", "def test_user_instance(self):\n db.session.add(self.user)\n db.session.commit()\n\n user = User.query.filter_by(user_name = \"john_doe\").first()\n users = User.query.all()\n\n self.assertTrue(len(users) > 0)\n self.assertEqual(user.user_name, \"john_doe\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.profile_pic_path, \"app/static/images\")\n self.assertEqual(user.first_name, \"John\")\n self.assertEqual(user.last_name, \"Doe\")\n self.assertEqual(user.headline, \"Food Blogger\")\n self.assertEqual(user.bio, \"Mainly writes on Chinese cuisine\")", "def test_save_multiple_users(self):\n self.new_user.save_user()\n test_user = User('Sophia', 'Robai', '0722857832', '[email protected]', 'val',\n 'password')\n test_user.save_user()\n self.assertEqual(len(User.UserDetails), 2)", "def test_create_new_student_user(self):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n 'name': \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertEqual(1, len(activation_token))", "def test_values(self):\n user = User()\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.password, \"\")\n self.assertEqual(user.first_name, \"\")\n self.assertEqual(user.last_name, \"\")", "def test_optional_email(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"7275a984-1e77-4084-9fe6-e54d0deba0e7\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_sourcedid\": \"user_without_email\",\n },\n passport,\n )\n\n self.assertEqual(\"user_without_email\", new_user.public_username)\n self.assertEqual(\"\", new_user.email)\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"user_without_email@consumer\", new_user.username)\n self.assertEqual(user_count + 1, get_user_model().objects.count())", "def test_creation_profile_1():\n assert tuple_NT[0][0] == LIST_dict[0]['name'], \"Name is not getting stored properly\"", "def test_associates_tweets_with_user(self):\n ingester = Version2TweetIngester()\n ingester.ingest(directory=FIXTURES_DIR_WITH_TWEETS)\n\n tweet = Tweet.objects.first()\n user = User.objects.first()\n\n self.assertEqual(tweet.user, user)", "def test_contains_user(self):\n print('(' + self.test_contains_user.__name__+')',\n self.test_contains_user.__doc__)\n self.assertTrue(self.connection.contains_user(PATIENT_USERNAME))\n self.assertTrue(self.connection.contains_user(DOCTOR_USERNAME))", "def social_associate_and_load_data(backend, details, response, uid, user,\n social_user=None, *args, **kwargs):\n extra_data = backend.extra_data(user, uid, response, details)\n created = False\n if not social_user and user:\n social_user, created = UserSocialAuth.objects.get_or_create(\n user_id=user.id,\n provider=backend.name,\n uid=uid,\n defaults={'extra_data': extra_data})\n\n if not created and extra_data and social_user.extra_data != extra_data:\n social_user.extra_data.update(extra_data)\n social_user.save()\n return {'social_user': social_user}", "def test_serve_user_properties(self):\n pass", "def test_associate_customer_on_signup(self):\n # is this necessary, or is it handled by login logic anyway?\n pass" ]
[ "0.6245322", "0.59301543", "0.5911321", "0.58538973", "0.58124703", "0.57879347", "0.56024635", "0.5550127", "0.55485487", "0.54875433", "0.54671925", "0.54621214", "0.54423463", "0.5395432", "0.5389309", "0.5383121", "0.5311547", "0.5295665", "0.5293737", "0.52753067", "0.5272109", "0.5255254", "0.5239738", "0.5237269", "0.5233854", "0.522482", "0.5209603", "0.52070737", "0.5201741", "0.51940554", "0.51902944", "0.51840675", "0.5175329", "0.51610667", "0.5153775", "0.51451063", "0.5144287", "0.5143778", "0.5139932", "0.5130433", "0.51276755", "0.51205873", "0.510438", "0.50994444", "0.50936276", "0.50874424", "0.5086441", "0.50862277", "0.5084716", "0.5072394", "0.5070918", "0.5064447", "0.5058719", "0.5055321", "0.505158", "0.5050795", "0.5048631", "0.50432795", "0.5040743", "0.50388634", "0.50348085", "0.5034732", "0.5034125", "0.50296", "0.5028577", "0.5027759", "0.5026522", "0.5025826", "0.50237995", "0.5018168", "0.50088334", "0.4998013", "0.49833557", "0.49764004", "0.4973253", "0.4970671", "0.4957561", "0.494913", "0.49485838", "0.49359483", "0.49344942", "0.49323875", "0.49308163", "0.49306318", "0.49296382", "0.49257347", "0.492348", "0.49233794", "0.49216726", "0.49196222", "0.4916887", "0.4913035", "0.49091798", "0.4909168", "0.49086076", "0.49084765", "0.49070787", "0.49034238", "0.49007985", "0.4900547" ]
0.7325418
0
Test SAML login logs with debug mode enabled or not
def test_debug_mode_login(self, debug_mode_enabled): self._configure_testshib_provider(debug_mode=debug_mode_enabled) with patch.object(saml_log, 'info') as mock_log: self._test_login() if debug_mode_enabled: # We expect that test_login() does two full logins, and each attempt generates two # logs - one for the request and one for the response assert mock_log.call_count == 4 expected_next_url = "/dashboard" (msg, action_type, idp_name, request_data, next_url, xml), _kwargs = mock_log.call_args_list[0] assert msg.startswith('SAML login %s') assert action_type == 'request' assert idp_name == self.PROVIDER_IDP_SLUG self.assertDictContainsSubset( {"idp": idp_name, "auth_entry": "login", "next": expected_next_url}, request_data ) assert next_url == expected_next_url assert '<samlp:AuthnRequest' in xml (msg, action_type, idp_name, response_data, next_url, xml), _kwargs = mock_log.call_args_list[1] assert msg.startswith('SAML login %s') assert action_type == 'response' assert idp_name == self.PROVIDER_IDP_SLUG self.assertDictContainsSubset({"RelayState": idp_name}, response_data) assert 'SAMLResponse' in response_data assert next_url == expected_next_url assert '<saml2p:Response' in xml else: assert not mock_log.called
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_logging_running(self):\n tester = app.test_client(self)\n response = tester.get('/login', content_type='html/text')\n self.assertTrue(b'PLEASE LOGIN' in response.data)", "def test_logging(self):\n self._verify_logging()", "def test_successful_login(self):\n pass", "def test_login_required():\n pass", "def test_logPage(self):\n\n # Create a test client\n client = server.app.test_client()\n\n # Use the test client to make requests\n result = client.post('/login', follow_redirects=True, data={'username':'[email protected]',\n 'password':'hadis'})\n\n print(\"it is printing \")\n\n # Compare result.data with assert method\n self.assertIn(b'New Event', result.data)", "def test_setup_logging_debug(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n with self.assertLogs(self.f_logger, LogLevels.DEBUG) as setup_ctx:\n setup_logging(LogLevels.DEBUG)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertTrue(self.boto3_logger.isEnabledFor(LogLevels.DEBUG))\n self.assertTrue(self.botocore_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertEqual(setup_ctx.output,\n [f'DEBUG:f-cli:Initalized logging for f-cli version {__version__}'])", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def test_activate_login(self):\r\n pass", "def test_login_page(self):\n r = requests.get(self.url)\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.content)\n self.assertEqual(soup.findAll('legend')[0].contents[0], 'Sign In')", "def test_user_login(self):\n self.client.login(username=self.username, password=self.password)\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def test_login_endpoint_not_verbose():\n url = f\"{DEXCOM_BASE_URL}/{DEXCOM_LOGIN_ENDPOINT}\"\n json = {\n \"accountName\": USERNAME,\n \"password\": \"a\",\n \"applicationId\": DEXCOM_APPLICATION_ID,\n }\n r = requests.request(\"post\", url, json=json,)\n assert r.json() == DEFAULT_SESSION_ID", "def assertDebugOnly(self): # FIXME: when at python 3.10+ replace with assertNoLogs\n with self.assertLogs(\"qiskit.quantum_info.synthesis\", \"DEBUG\") as ctx:\n yield\n for i in range(len(ctx.records)):\n self.assertLessEqual(\n ctx.records[i].levelno,\n logging.DEBUG,\n msg=f\"Unexpected logging entry: {ctx.output[i]}\",\n )\n self.assertIn(\"Requested fidelity:\", ctx.records[i].getMessage())", "def test_professor_can_login_to_web_portal(professor):", "def test_aio_can_login_to_web_portal(aio):", "def test_login_session_check(self):\r\n\t\tprint(\"\")\r\n\t\tprint(\"`login_session_check` method tests\")\r\n\t\tprint(\"---------------------\")\r\n\t\tprint(\"Test: `login_session_check: logged in`\")\r\n\t\tpath = 'login'\r\n\t\twith requests_mock.mock() as m:\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":true,\r\n\t\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-07-30\",\r\n\t\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\t\"USER\":\"restuser\",\r\n\t\t\t\t\t\t\t\"USER_ID\":2,\r\n\t\t\t\t\t\t\t\"expired_pwd\":false\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t],\r\n\t\t\t\t\t\"success\":true\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == True\r\n\t\t\tassert session_check[1]['FORCE_PWD_CHANGE'] == True\r\n\t\t\tassert session_check[1]['LAST_ACCT'] == 1\r\n\t\t\tassert session_check[1]['NEXT_PWNED'] == None\r\n\t\t\tassert session_check[1]['ROOT'] == True\r\n\t\t\tassert session_check[1]['USER_ID'] == 2\r\n\t\t\tassert session_check[1]['USER'] == 'restuser'\r\n\t\t\tassert session_check[1]['expired_pwd'] == False\r\n\t\t\tprint(\"Passed!!!\")\r\n\t\t\tprint(\"Test: `login_session_check: not logged in`\")\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\t[],\r\n\t\t\t\t\t\"success\":false\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == False\r\n\t\t\tassert not session_check[1] # dictionary should be empty\r\n\t\tprint(\"Passed!!!\")", "def test_auth_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey'),\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertNotEqual(\n response,\n None,\n \"Authentication with the configured settings \"\n \"was not successful\"\n )", "def test_admin_can_login_to_web_portal(admin):", "def testGetLogAuth(self):\n response = self._get('inventory/log/')\n self.assertEquals(response.status_code, 401)\n\n response = self._get('inventory/log/', username=\"testuser\",\n password=\"password\")\n self.assertEquals(response.status_code, 200)", "def debugging_tests():\n logging.warning(\"Running debugging tests...\")\n pass", "def test_level_debug(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.DEBUG)), \":detective: **test**\")", "def test_login(self):\n print(\"Test Login\")\n self.mock_api.return_value = LOGIN_RESPONSE\n self.manager.enabled = False\n assert self.manager.login()\n all_kwargs = parse_args(self.mock_api)\n assert assert_test(self.manager.login, all_kwargs, None,\n self.write_api, self.overwrite)", "def test_show_login_page(self):\n with self.client as c:\n\n res = c.get(\"/login\")\n html = res.get_data(as_text=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Don't have an account?\", html)", "def test_login_failure(self):\n self.client.login(username=self.username, password='AWrongPassword')\n # 2: creation and login\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def test_valid_login(self):\n self.assertTrue(self.session.authenticate('[email protected]', 'supersecret'))", "def test_user_login(self):\n\n for i in range(0, len(self.users)):\n\n # Gets user\n user = self.users[i]\n\n # Creates payload\n event = {\n \"username\": user['username'],\n \"pwd\": user['pwd']\n }\n\n # Invokes\n response = handler.user_login(event=event, context=None)\n\n # Validates response\n body_dict = json.loads(response['body'])\n apidataset_dict = body_dict['apidataset']\n self.assertEqual(response['statusCode'], 200)\n self.assertEqual (\n apidataset_dict['displayName'],\n user['nameFirst'] + ' ' + user['nameLast']\n )\n self.assertIn('sessionToken', apidataset_dict)", "def test_login(\n config,\n):\n with requests_mock.Mocker() as m:\n sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._login_url,\n status_code=302,\n # pylint: disable=protected-access\n headers={\"location\": sms._kontomanager},\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._kontomanager,\n status_code=200,\n text=\"test...\" + LOGIN + \"</a>\",\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._logout_url,\n status_code=200,\n )\n # pylint: disable=protected-access\n session, request = sms._login(requests.Session(), get_request=True)\n # pylint: disable=protected-access\n session.get(sms._logout_url)\n # pylint: disable=protected-access\n assert sms._logindata[\"login_rufnummer\"][-7:] + \"</a>\" in request.text\n # pylint: disable=protected-access\n assert request.url == sms._kontomanager", "def testLogin(self):\n mt = self.portal.portal_membership\n self.logout()\n self.login('abc123')\n member = mt.getAuthenticatedMember()\n self.failUnlessEqual(member.id, 'abc123', msg=\"incorrect user logged in: %s\" % member)", "def assert_login_response_before_pipeline_looks_correct(self, response):\r\n self.assertEqual(200, response.status_code)\r\n self.assertIn('Sign in with ' + self.PROVIDER_CLASS.NAME, response.content)\r\n self.assert_javascript_would_submit_login_form(False, response)\r\n self.assert_signin_button_looks_functional(response.content, pipeline.AUTH_ENTRY_LOGIN)", "def test_get_all_event_with_login(self):\n self.client.login(email='[email protected]', password='top_secret')\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)", "def test_login(self):\n\n with self.client as c:\n result = c.post('/login',\n data={'email': '[email protected]', 'password': 'abc'},\n follow_redirects=True\n )\n self.assertEqual(session['user_id'], 1)\n self.assertIn(\"You are logged in\", result.data)\n\n print \"DONE WITH LOGIN CHECK\"", "def test_list_login():\n assert_redirect_to_login('/')\n assert_not_redirect_to_login('/')", "def test_login_errors(self):\n login = '/login?user_name=nobody&password=wrong&login=Login'\n response = self.app.get(login)\n assert \"<title>Login</title>\" in response\n assert \"session cookies\" in response\n cookie = ', '.join(map(str, response.cookies_set.values()))\n response = self.app.get(login, headers=dict(Cookie=cookie))\n assert \"<title>Login</title>\" in response\n assert \"credentials\" in response\n assert \"not correct\" in response", "def test_shred_login():\n assert_redirect_to_login('/shred/')\n assert_not_redirect_to_login('/shred/')", "def logged():\n if session.login==1:\n return True\n else:\n return False", "def test_isolation(self):\n\n root = logging.getLogger('')\n nose = logging.getLogger('nose')\n\n config = Config()\n config.configureLogging()\n \n root.setLevel(logging.DEBUG)\n self.assertEqual(nose.level, logging.WARN)", "def test_regular_user_login(self):\n self.login(\"user\", \"user\")\n self.should_see(\"This is your profile, user.\")", "def test_auth_failure_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey') + \"1234\",\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertEqual(\n response,\n None,\n \"Authentication did not return 'None', but %s instead.\" % (\n response\n )\n )", "def test_login(self, mock):\n _setup_responses(mock)\n api = LiveStreamApi(\"user\", \"pass\")\n\n api.login()\n\n with self.subTest(\"stores the token on the api object\"):\n self.assertEqual(api._token, \"ffffffffffffffffffffffffffffffffffffffff\")\n\n with self.subTest(\"stores ssesyranac cookie on the api object\"):\n self.assertEqual(api._ssesyranac, \"ssesyranac\")", "def test_loginpage_view(self):\n response = self.client.get(url_for('login'))\n self.assertEqual(response.status_code, 200)", "def test_get_security_log_report_in_detail(self, mock_send_cli_cmd, mock_sleep):\n self.log.display_title(title=self.tool.get_current_function_name())\n self.log.step_num = 0\n msg = \"get in detail result is existing\"\n response = \"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n type=\"session-close\",\n category=\"1\",\n source_address=\"192.168.100.103\",\n source_zone=\"source_zone1\",\n source_interface=\"1\",\n destination_address=\"192.168.200.103\",\n threat_severity=\"1\",\n count=\"1\",\n reason=\"1\",\n service=\"1\",\n url=\"1\",\n role=\"1\",\n profile=\"1\",\n protocol=\"1\",\n policy_name=\"1\",\n rule_name=\"1\",\n nested_application=\"1\",\n operation=\"1\",\n application=\"1\",\n user=\"1\",\n source_name=\"1\",\n event_type=\"1\",\n start_from=\"1\",\n start_time=\"1\",\n stop_time=\"1\",\n check_content=\"cnrd-ngsrxqavm40\",\n option=\"logical-systems LSYS1\",\n exist=\"yes\",\n )\n self.assertTrue(isinstance(result, str))\n\n msg = \"get in detail result with no parameter\"\n response = \"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"yes\",\n )\n self.assertTrue(isinstance(result, str))\n\n msg = \"get in detail result exist is none\"\n response = \"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n type=\"session-close\",\n check_content=\"cnrd-ngsrxqavm40\",\n )\n self.assertFalse(result)\n\n msg = \"get in detail result is none\"\n response = \"\"\"\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n type=\"session-close\",\n check_content=\"cnrd-ngsrxqavm40\",\n )\n self.assertFalse(result)\n\n msg = \"get in detail result exist is no with no check_content\"\n response = \"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n type=\"session-close\",\n check_content=\"1234567\",\n exist=\"no\"\n )\n self.assertTrue(result)\n\n\n msg = \"get in detail result check_content is wrong\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n )\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"yes\",\n )\n self.assertTrue(isinstance(result, str))\n\n\n\n msg = \"get in detail result check_content is wrong\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n )\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n check_content=\"1234567\",\n exist=\"yes\",\n )\n self.assertFalse(result)\n\n\n msg = \"get in detail result check_content is wrong\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n )\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"no\",\n )\n self.assertFalse(result)\n\n\n msg = \"get in detail result exist is no\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-log-report-in-detail>\n <entry>\n </entry>\n </security-log-report-in-detail>\n \"\"\"),\n )\n\n #mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_report_in_detail(\n device=None,\n type=\"session-close\",\n source_address=\"192.168.100.103\",\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"no\",\n )\n self.assertTrue(result)", "def test_correct_login(self):\n self.app.post('/register', data=self.user_reg_details)\n res = self.app.post('/login', data=self.user_login_details)\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Successfully logged in\", str(res.data))", "def test_check_security_log_content(self, mock_send_cli_cmd):\n self.log.display_title(title=self.tool.get_current_function_name())\n self.log.step_num = 0\n msg = \"get in detail result is existing\"\n response = '<14>1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"20005\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"20005\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]'\n # mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.check_security_log_content(\n device=None,\n content=response,\n count=\"1\",\n check_count=1,\n source_address=\"192.168.100.103\",\n destination_address=\"192.168.200.103\",\n service=\"Medium\",\n application=\"application4\",\n nested_application=\"nested_application4\",\n operation=\"or\",\n username=\"user4\",\n event_type=\"RT_FLOW_SESSION_CLOSE\",\n start_from=\"1\",\n start_from_content=\"RT_FLOW_SESSION_CLOSE\",\n start_time=\"2017-02-22T14:15:30\",\n stop_time=\"2017-02-22T14:15:45\",\n device_name=\"cnrd-ngsrxqavm40\",\n check_content=\"cnrd-ngsrxqavm40\",\n )\n self.assertTrue(result)\n\n response = '<14>1 2015-11-23T14:06:00.715+08:00 bjsolar RT_IDP - IDP_ATTACK_LOG_EVENT [[email protected] epoch-time=\"1448258760\" message-type=\"SIG\" source-address=\"25.0.0.254\" source-port=\"2\" destination-address=\"192.1.1.2\" destination-port=\"44386\" protocol-name=\"ICMP\" service-name=\"SERVICE_IDP\" application-name=\"NONE\" rule-name=\"1\" rulebase-name=\"IPS\" policy-name=\"idp-policy1\" export-id=\"9025\" repeat-count=\"0\" action=\"NONE\" threat-severity=\"INFO\" attack-name=\"ICMP:INFO:ECHO-REPLY\" nat-source-address=\"0.0.0.0\" nat-source-port=\"0\" nat-destination-address=\"0.0.0.0\" nat-destination-port=\"0\" elapsed-time=\"0\" inbound-bytes=\"0\" outbound-bytes=\"0\" inbound-packets=\"0\" outbound-packets=\"0\" source-zone-name=\"cppm\" source-interface-name=\"ge-11/0/4.0\" destination-zone-name=\"trust\" destination-interface-name=\"ge-11/0/1.0\" packet-log-id=\"0\" alert=\"no\" username=\"N/A\" roles=\"N/A\" message=\"-\"]'\n # mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.check_security_log_content(\n device=None,\n content=response,\n count=\"1\",\n check_count=1,\n source_address=\"25.0.0.254\",\n destination_address=\"192.1.1.2\",\n threat_severity=\"INFO\",\n protocol_name=\"ICMP\",\n rule_name=\"1\",\n application_name=\"NONE\",\n event_type=\"IDP_ATTACK_LOG_EVENT\",\n start_from=\"1\",\n start_from_content=\"IDP_ATTACK_LOG_EVENT\",\n start_time=\"2015-11-23T14:06:00\",\n stop_time=\"2015-11-23T14:06:00\",\n device_name=\"bjsolar\",\n check_content=\"bjsolar\",\n )\n self.assertTrue(result)\n\n msg = \"get in detail result is existing\"\n response = '<14>1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"20005\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"20005\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]'\n # mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.check_security_log_content(\n device=None,\n content=response,\n count=\"3\",\n check_count=1,\n category=\"1\",\n source_address=\"192222\",\n destination_address=\"12222\",\n threat_severity=\"1\",\n reason=\"1\",\n service=\"1\",\n url=\"1\",\n roles=\"1\",\n profile=\"1\",\n protocol_name=\"1\",\n rule_name=\"1\",\n nested_application=\"1\",\n operation=\"1\",\n application=\"1\",\n application_name=\"1\",\n username=\"1\",\n source_name=\"1\",\n event_type=\"aaaaaN_CLOSE\",\n start_from=\"1\",\n start_from_content=\"333\",\n start_time=\"2017-02-22T14:16:30\",\n stop_time=\"2017-02-22T14:16:45\",\n device_name=\"cnrd-ngsrxqavm40\",\n check_content=\"5555\",\n )\n self.assertFalse(result)\n\n msg = \"get in detail result with no parameter\"\n response = '<14>1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"20005\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"20005\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]'\n # mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n result = self.ins.check_security_log_content(\n device=None,\n content=response,\n )\n self.assertTrue(result)\n\n msg = \"get in detail result without check_content\"\n response = \"\"\"\n <12>1 2017-05-11T13:45:24 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - WEBFILTER_URL_BLOCKED [[email protected] source-address=\"1.16.16.16\" source-port=\"36733\" destination-address=\"2.16.16.16\" destination-port=\"80\" session-id=\"1\" category=\"N/A\" reason=\"TESTSPAM\" profile=\"PROFILE\" url=\"http://www.viruslist.com/en/search?VN=EICAR-Test-File\" obj=\"N/A\" username=\"N/A\" roles=\"N/A\"]\n <14>1 2017-05-11T13:45:24 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - WEBFILTER_URL_PERMITTED [[email protected] source-address=\"1.16.16.16\" source-port=\"36733\" destination-address=\"2.16.16.16\" destination-port=\"80\" session-id=\"1\" category=\"N/A\" reason=\"TESTSPAM\" profile=\"PROFILE\" url=\"http://www.viruslist.com/en/search?VN=EICAR-Test-File\" obj=\"N/A\" username=\"N/A\" roles=\"N/A\"]\n \"\"\"\n # mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n result = self.ins.check_security_log_content(\n device=None,\n check_count=\"2\",\n content=response,\n category=\"N/A\",\n reason=\"TESTSPAM\",\n url=\"http://www.viruslist.com/en/search?VN=EICAR-Test-File\",\n roles=\"N/A\",\n profile=\"PROFILE\",\n )\n self.assertTrue(result)\n\n msg = \"get in detail result with source-name\"\n response = \"\"\"\n <14>1 2017-05-11T13:45:24 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - ANTISPAM_SPAM_DETECTED_MT [[email protected] source-name=\"[email protected]\" source-address=\"1.16.16.16\" profile-name=\"PROFILE\" action=\"BLOCKED\" reason=\"TESTSPAM\" username=\"N/A\" roles=\"N/A\"]\n \"\"\"\n # mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n result = self.ins.check_security_log_content(\n device=None,\n check_count=\"1\",\n content=response,\n source_name=\"[email protected]\",\n )\n self.assertTrue(result)", "def test_login_view(self):\n response = self.client.get(url_for('login'))\n self.assertEqual(response.status_code, 200)", "def test_get_login(self):\n login = Login(self.client, 123)\n\n self.assertEqual(login.id, 123)\n self.assertEqual(login.ip, \"192.0.2.0\")\n self.assertEqual(login.restricted, True)\n self.assertEqual(login.status, \"successful\")\n self.assertEqual(login.username, \"test-user\")", "def test_login(self):\n res = self.client.get(\"/login\")\n data = res.data.decode(\"utf-8\")\n assert res.status == \"200 OK\"\n assert \"Already a member!\" in data", "def test_login_logout(self):\n '''\n rv = self.login(flaskr.app.config['USERNAME'],\n flaskr.app.config['PASSWORD'])\n assert 'You were logged in' in rv.data\n rv = self.logout()\n assert 'You were logged out' in rv.data\n rv = self.login(flaskr.app.config['USERNAME'] + 'x',\n flaskr.app.config['PASSWORD'])\n assert 'Invalid username' in rv.data\n rv = self.login(flaskr.app.config['USERNAME'],\n flaskr.app.config['PASSWORD'] + 'x')\n assert 'Invalid password' in rv.data\n '''\n pass", "def details_not_matching():\n print(\"login details don't match.\")", "def test_emotion_analysis_route_get_has_200_logged_in(self):\n self.client.login(username='dan', password='password')\n response = self.client.get(reverse_lazy('emotion_analysis'))\n self.assertEqual(response.status_code, 200)", "def test_middleware_loads(self):\n self.client.get(\"/__debug__\")", "def test_access_with_debug(self):\n review_request = self.create_review_request(publish=True)\n review = self.create_review(review_request, publish=True)\n\n with self.settings(DEBUG=True):\n response = self.client.get(\n local_site_reverse(\n 'preview-review-email',\n kwargs={\n 'review_request_id': review_request.pk,\n 'review_id': review.pk,\n 'message_format': 'text',\n }))\n\n self.assertEqual(response.status_code, 200)", "def test_adtls(self):\n for domain in config.ADW2K12_DOMAINS:\n principal = '%s@%s' % (config.ADW2k12_USER1, domain)\n\n testflow.step(\"Login as user %s\", config.ADW2k12_USER1)\n users.loginAsUser(\n principal,\n self.conf['authn_name'],\n config.ADW2k12_USER_PASSWORD,\n True,\n )\n\n testflow.step(\n \"Testing connection with user %s\", config.ADW2k12_USER1\n )\n assert common.connectionTest(), \"User %s can't login.\" % principal", "def samladsv3(self):\n try:\n # Get the federated credentials from the user\n print(\"[-] Get authentication token\")\n print(\"Email:\", end=' ')\n username = input()\n password = getpass.getpass()\n print('')\n\n # Initiate session handler\n session = requests.Session()\n\n # Programmatically get the SAML assertion\n # Opens the initial IdP url and follows all of the HTTP302 redirects, and\n # gets the resulting login page\n formresponse = session.get(idpentryurl, verify=sslverification)\n # Capture the idpauthformsubmiturl, which is the final url after all the 302s\n idpauthformsubmiturl = formresponse.url\n\n # Parse the response and extract all the necessary values\n # in order to build a dictionary of all of the form values the IdP expects\n formsoup = BeautifulSoup(formresponse.text, \"html.parser\")\n payload = {}\n\n for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):\n name = inputtag.get('name','')\n value = inputtag.get('value','')\n if \"user\" in name.lower():\n #Make an educated guess that this is the right field for the username\n payload[name] = username\n elif \"email\" in name.lower():\n #Some IdPs also label the username field as 'email'\n payload[name] = username\n elif \"pass\" in name.lower():\n #Make an educated guess that this is the right field for the password\n payload[name] = password\n else:\n #Simply populate the parameter with the existing value (picks up hidden fields in the login form)\n payload[name] = value\n\n # Debug the parameter payload if needed\n # Use with caution since this will print sensitive output to the screen\n #print(payload)\n\n # Some IdPs don't explicitly set a form action, but if one is set we should\n # build the idpauthformsubmiturl by combining the scheme and hostname\n # from the entry url with the form action target\n # If the action tag doesn't exist, we just stick with the\n # idpauthformsubmiturl above\n for inputtag in formsoup.find_all(re.compile('(FORM|form)')):\n action = inputtag.get('action')\n loginid = inputtag.get('id')\n if (action and loginid == \"loginForm\"):\n parsedurl = urlparse(idpentryurl)\n idpauthformsubmiturl = parsedurl.scheme + \"://\" + parsedurl.netloc + action\n\n # Performs the submission of the IdP login form with the above post data\n response = session.post(\n idpauthformsubmiturl, data=payload, verify=sslverification)\n\n # Debug the response if needed\n #print(response.text)\n\n # Overwrite and delete the credential variables, just for safety\n username = '##############################################'\n password = '##############################################'\n del username\n del password\n\n # Decode the response and extract the SAML assertion\n soup = BeautifulSoup(response.text, \"html.parser\")\n assertion = ''\n\n # Look for the SAMLResponse attribute of the input tag (determined by\n # analyzing the debug print lines above)\n for inputtag in soup.find_all('input'):\n if(inputtag.get('name') == 'SAMLResponse'):\n #print(inputtag.get('value'))\n assertion = inputtag.get('value')\n\n # Better error handling is required for production use.\n if (assertion == ''):\n #TODO: Insert valid error checking/handling\n print('Response did not contain a valid SAML assertion')\n sys.exit(0)\n\n # Debug only\n #print(base64.b64decode(assertion))\n\n # Parse the returned assertion and extract the authorized roles\n awsroles = []\n root = ET.fromstring(base64.b64decode(assertion))\n for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):\n if (saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role'):\n for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):\n awsroles.append(saml2attributevalue.text)\n\n # Note the format of the attribute value should be role_arn,principal_arn\n # but lots of blogs list it as principal_arn,role_arn so let's reverse\n # them if needed\n for awsrole in awsroles:\n chunks = awsrole.split(',')\n if'saml-provider' in chunks[0]:\n newawsrole = chunks[1] + ',' + chunks[0]\n index = awsroles.index(awsrole)\n awsroles.insert(index, newawsrole)\n awsroles.remove(awsrole)\n\n # If I have more than one role, ask the user which one they want,\n # otherwise just proceed\n print(\"\")\n if len(awsroles) > 1:\n i = 0\n print(\"Please choose the role you would like to assume:\")\n for awsrole in awsroles:\n print('[', i, ']: ', awsrole.split(',')[0])\n i += 1\n print(\"Selection: \", end=' ')\n selectedroleindex = input()\n\n # Basic sanity check of input\n if int(selectedroleindex) > (len(awsroles) - 1):\n print('You selected an invalid role index, please try again')\n sys.exit(0)\n\n role_arn = awsroles[int(selectedroleindex)].split(',')[0]\n principal_arn = awsroles[int(selectedroleindex)].split(',')[1]\n else:\n role_arn = awsroles[0].split(',')[0]\n principal_arn = awsroles[0].split(',')[1]\n\n # Use the assertion to get an AWS STS token using Assume Role with SAML\n conn = boto3.client('sts', region_name=region)\n token = conn.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion)\n\n # Read in the existing config file\n config = configparser.RawConfigParser()\n config.read(credentials)\n\n # Put the credentials into a saml specific section instead of clobbering\n # the default credentials\n if not config.has_section('saml'):\n config.add_section('saml')\n\n config['saml']['output'] = outputformat\n config['saml']['region'] = region\n config['saml']['aws_access_key_id'] = token['Credentials']['AccessKeyId']\n config['saml']['aws_secret_access_key'] = token['Credentials']['SecretAccessKey']\n config['saml']['aws_session_token'] = token['Credentials']['SessionToken']\n\n # Write the updated config file\n with open(credentials, 'w+') as configfile:\n config.write(configfile)\n\n # Give the user some basic info as to what has just happened\n print('\\n\\n----------------------------------------------------------------')\n print('Your new access key pair has been stored in the AWS configuration file {0} under the saml profile.'.format(credentials))\n print('Note that it will expire at {0}.'.format(token['Credentials']['Expiration'].astimezone(get_localzone())))\n print('After this time, you may safely rerun this script to refresh your access key pair.')\n print('To use this credential, call the AWS CLI with the --profile option (e.g. aws --profile saml ec2 describe-instances).')\n print('----------------------------------------------------------------\\n\\n')\n\n return samladsv3\n\n except Exception as e:\n print(\"Error while getting authentication token. %s\" % e)", "def test_login():\n My.search_merchant_page(driver, My.Testing_Env_EN)\n validate_login()\n print('----------')\n My.search_merchant_page(driver, My.Testing_Env_FR)\n validate_login()\n driver.quit()", "def test_login_successful():\n user_info = {\"password\": \"abc123\", \"username\": \"0450539776\",}\n response = requests.get(\"http://127.0.0.1:7700/login\", params=user_info)\n assert(response.status_code == 200)\n assert(\"token\" in response.json())", "def test_login(self):\r\n\t\tprint(\"\")\r\n\t\tprint('`login` method tests')\r\n\t\tprint('--------------------')\r\n\t\tpath = 'login'\r\n\t\ttestdata =\t{\r\n\t\t\t'test' : 'Valid credentials',\r\n\t\t\t'login': 'restuser',\r\n\t\t\t'password': 'puppies1234567890',\r\n\t\t\t'responsecode': 200,\r\n\t\t\t'responsetext' : \"\"\"{\r\n\t\t\t\t\"rows\":\r\n\t\t\t\t\t[\r\n\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":false,\r\n\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-10-23\",\r\n\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\"USER\":\"restuser\"\r\n\t\t\t\t\t\t,\"USER_ID\":2,\r\n\t\t\t\t\t\t\"expired_pwd\":false\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t],\r\n\t\t\t\t\"success\":true\r\n\t\t\t}\"\"\"\r\n\t\t}\r\n\t\tprint(f\"Test: `{testdata['test']}`\")\r\n\t\twith requests_mock.mock() as m:\r\n\t\t\tm.post(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = testdata['responsecode'],\r\n\t\t\t\ttext = testdata['responsetext']\r\n\t\t\t)\r\n\t\t\tassert sdk.login(testdata['login'], testdata['password']) == True\r\n\t\tprint('Passed!!!')\r\n\t\ttestdata = [\r\n\t\t\t{\r\n\t\t\t\t'test': 'Invalid Credentials',\r\n\t\t\t\t'login': 'rest123',\r\n\t\t\t\t'password': 'puppies7890',\r\n\t\t\t\t'responsecode': 400,\r\n\t\t\t\t'exception': \"Login error: `invalid credentials`\",\r\n\t\t\t\t'responsetext' : \"\"\"{\r\n\t\t\t\t\t\"error\":\"invalid credentials\",\r\n\t\t\t\t\t\"success\":false\r\n\t\t\t\t}\"\"\"\r\n\t\t\t},\r\n\t\t\t{\r\n\t\t\t\t'test': 'Expired Password',\r\n\t\t\t\t'login': 'rest123',\r\n\t\t\t\t'password': 'puppies7890',\r\n\t\t\t\t'responsecode': 200,\r\n\t\t\t\t'exception': f'Login error: `Password has expired. Please log in with a browser to {host} to change your password`',\r\n\t\t\t\t'responsetext' : \"\"\"{\r\n\t\t\t\t\t\"rows\":\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":false,\r\n\t\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-07-25\",\r\n\t\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\t\"USER\":\"restuser\"\r\n\t\t\t\t\t\t\t,\"USER_ID\":2,\r\n\t\t\t\t\t\t\t\"expired_pwd\":true\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t],\r\n\t\t\t\t\t\"success\":true\r\n\t\t\t\t}\"\"\"\r\n\t\t\t},\r\n\t\t\t{\r\n\t\t\t\t'test': 'Forced Password Reset',\r\n\t\t\t\t'login': 'rest123',\r\n\t\t\t\t'password': 'puppies7890',\r\n\t\t\t\t'responsecode': 200,\r\n\t\t\t\t'exception': f'Login error: `Password must be changed. Please log in with a browser to {host} to change your password`',\r\n\t\t\t\t'responsetext' : \"\"\"{\r\n\t\t\t\t\t\"rows\":\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":true,\r\n\t\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-07-30\",\r\n\t\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\t\"USER\":\"restuser\"\r\n\t\t\t\t\t\t\t,\"USER_ID\":2,\r\n\t\t\t\t\t\t\t\"expired_pwd\":false\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t],\r\n\t\t\t\t\t\"success\":true\r\n\t\t\t\t}\"\"\"\r\n\t\t\t},\r\n\t\t\t{\r\n\t\t\t\t'test': 'IP Address Control',\r\n\t\t\t\t'login': 'rest123',\r\n\t\t\t\t'password': 'puppies7890',\r\n\t\t\t\t'responsecode': 400,\r\n\t\t\t\t'exception': f'Login error: `user cannot access from this ip address`',\r\n\t\t\t\t'responsetext' : \"\"\"{\r\n\t\t\t\t\t\"error\":\"user cannot access from this ip address\",\r\n\t\t\t\t\t\"success\":false\r\n\t\t\t\t}\"\"\"\r\n\t\t\t}\r\n\t\t]\r\n\t\tfor row in testdata:\r\n\t\t\tprint(f\"Test: `{row['test']}`\")\r\n\t\t\twith requests_mock.mock() as m:\r\n\t\t\t\tm.post(\r\n\t\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\t\tstatus_code = row['responsecode'],\r\n\t\t\t\t\ttext = row['responsetext']\r\n\t\t\t\t)\r\n\t\t\t\twith pytest.raises(GravAuthError) as e:\r\n\t\t\t\t\tassert sdk.login(row['login'], row['password'])\r\n\t\t\t\tassert f'{e.value}' == row['exception']\r\n\t\t\tprint('Passed!!!')", "def test_session_is_accessed(self):\n response = self.client.get(\"/auth_processor_attr_access/\")\n self.assertContains(response, \"Session accessed\")", "def test_verification_status_visible(self):\r\n self.client.login(username=\"jack\", password=\"test\")\r\n self.check_verification_status_on('verified', 'You\\'re enrolled as a verified student')\r\n self.check_verification_status_on('honor', 'You\\'re enrolled as an honor code student')\r\n self.check_verification_status_on('audit', 'You\\'re auditing this course')", "def test_setup_logging_info(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n setup_logging()\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertFalse(self.boto3_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.botocore_logger.isEnabledFor(LogLevels.INFO))", "def _assert_shib_login_is_logged(self, audit_log_call, remote_user):\r\n method_name, args, _kwargs = audit_log_call\r\n self.assertEquals(method_name, 'info')\r\n self.assertEquals(len(args), 1)\r\n self.assertIn(u'logged in via Shibboleth', args[0])\r\n self.assertIn(remote_user, args[0])", "def sso_debug_user_data(request, idp_slug):\n if settings.SERVER_ENVIRONMENT not in ['staging']:\n raise Http404()\n return HttpResponse(json.dumps({\n \"samlUserdata\": request.session.get('samlUserdata'),\n \"samlNameId\": request.session.get('samlNameId'),\n \"samlNameIdFormat\": request.session.get('samlNameIdFormat'),\n \"samlNameIdNameQualifier\": request.session.get('samlNameIdNameQualifier'),\n \"samlNameIdSPNameQualifier\": request.session.get('samlNameIdSPNameQualifier'),\n \"samlSessionIndex\": request.session.get('samlSessionIndex'),\n }), 'text/json')", "def test_login_page_loads(self):\n response = self.client.get('/users/login')\n self.assertIn(b'Please login', response.data)", "def test_validate_login_info(self):\n assert(PatientService().validate_login_info(self.valid_health_card_nb, self.password) > 0)\n assert(-1 == PatientService().validate_login_info(self.valid_health_card_nb, self.password + \"INVALID\"))", "def full_text(env_config):\n # Setup : start login\n url = env_config['host']['url'] + env_config['Login_url']['lurl']\n loginparams = {'username': env_config['Login_data']['username'], 'password': env_config['Login_data']['password'],\n 'vcode': env_config['Login_data']['vcode']}\n\n # session = requests.sessions()\n r = requests.post(url, params=loginparams)\n # print(r.url)\n cookie = r.cookies.get_dict()\n result_login = r.json()\n jsessionid = dict({'JSESSIONID': cookie['JSESSIONID']})\n assert str(result_login['success']) == 'True'\n\n yield jsessionid # 此处开始执行测试用例且传递setup之前的数据到test\n\n # Teardown : over\n pytest.exit('测试结束!')", "def test_login_responser(self):\n response = self.client.get(reverse('cookbook:login'))\n self.assertContains(response, \"Sign in\")", "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def test_login_view(self):\n response = self.client.get(url_for('users.login'))\n self.assertEqual(response.status_code, 200)", "def test_login(self):\n self.facebook_page.login()\n self.assertIsNotNone(self.facebook_page.webdriver.find_element_by_name('requests'))", "def test_xml_response_parser(mock_success_login):\n test = soap_login(USERNAME, PASSWORD, TOKEN)\n expected = (\"sessionId\", \"www.salesforce.com\")\n\n assert test == expected", "def test_patient_login(self):\n\n data = {\"email\": \"[email protected]\", \"password\": \"password\"}\n result = self.client.post(\"/patient-login\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Patient Dashboard\", result.data)\n\n data = {\"email\": \"[email protected]\", \"password\": \"pass\"}\n result = self.client.post(\"/patient-login\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Incorrect password\", result.data)\n\n data = {\"email\": \"[email protected]\", \"password\": \"password\"}\n result = self.client.post(\"/patient-login\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"No account with\", result.data)", "def test_login_title(self):\n response = self.app.get('/login')\n assert \"<title>Login</title>\" in response\n assert \"Please log in.\" in response\n assert \"session cookies\" not in response\n assert \"credentials\" not in response\n assert \"not correct\" not in response", "def test_need_login(self):\n urls = [\n reverse(\"survey-detail\", kwargs={\"id\": 1}),\n reverse(\"survey-completed\", kwargs={\"id\": 1}),\n reverse(\"survey-detail-step\", kwargs={\"id\": 1, \"step\": 1}),\n ]\n for url in urls:\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.assertTrue(settings.LOGIN_URL in response[\"location\"])\n self.login()\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.logout()", "def test_get_security_log_with_cmd(self, mock_send_cli_cmd, mock_sleep):\n self.log.display_title(title=self.tool.get_current_function_name())\n self.log.step_num = 0\n msg = \"get in detail result is existing\"\n response = \"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_with_cmd(\n device=None,\n type=\"stream\",\n file_name=\"file\",\n stream_file_name=\"f1\",\n category=\"1\",\n source_address=\"192.168.100.103\",\n source_zone=\"source_zone1\",\n source_interface=\"1\",\n destination_address=\"192.168.200.103\",\n threat_severity=\"1\",\n count=\"1\",\n reason=\"1\",\n service=\"1\",\n url=\"1\",\n role=\"1\",\n profile=\"1\",\n protocol=\"1\",\n policy_name=\"1\",\n rule_name=\"1\",\n nested_application=\"1\",\n operation=\"1\",\n application=\"1\",\n user=\"1\",\n source_name=\"1\",\n event_type=\"1\",\n start_from=\"1\",\n start_time=\"1\",\n stop_time=\"1\",\n check_content=\"cnrd-ngsrxqavm40\",\n option=\"logical-systems LSYS1\",\n exist=\"yes\",\n )\n self.assertTrue(isinstance(result, str))\n\n msg = \"get in detail result with no parameter\"\n response = \"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n result = self.ins.get_security_log_with_cmd(\n device=None,\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"yes\",\n )\n self.assertTrue(isinstance(result, str))\n\n msg = \"get in detail result exist is none\"\n response = \"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_with_cmd(\n device=None,\n type=\"session-close\",\n check_content=\"cnrd-ngsrxqavm40\",\n )\n self.assertFalse(result)\n\n msg = \"get in detail result is none\"\n response = \"\"\"\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_with_cmd(\n device=None,\n type=\"session-close\",\n check_content=\"cnrd-ngsrxqavm40\",\n )\n self.assertFalse(result)\n\n msg = \"get in detail result exist is no with no check_content\"\n response = \"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"\n mock_send_cli_cmd.return_value = self.xml.xml_string_to_dict(xml_str=response)\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_with_cmd(\n device=None,\n type=\"session-close\",\n check_content=\"1234567\",\n exist=\"no\"\n )\n self.assertTrue(result)\n\n\n msg = \"get in detail result check_content is wrong\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n )\n result = self.ins.get_security_log_with_cmd(\n device=None,\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"yes\",\n )\n self.assertTrue(isinstance(result, str))\n\n\n\n msg = \"get in detail result check_content is wrong\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n )\n result = self.ins.get_security_log_with_cmd(\n device=None,\n check_content=\"1234567\",\n exist=\"yes\",\n )\n self.assertFalse(result)\n\n\n msg = \"get in detail result check_content is wrong\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n )\n result = self.ins.get_security_log_with_cmd(\n device=None,\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"no\",\n )\n self.assertFalse(result)\n\n\n msg = \"get in detail result exist is no\"\n mock_send_cli_cmd.side_effect = (\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n &lt;14&gt;1 2017-02-22T14:15:35 cnrd-ngsrxqavm40 RT_LOG_SELF_TEST - RT_FLOW_SESSION_CLOSE [[email protected] reason=\"Some reason\" source-address=\"192.168.100.103\" source-port=\"8003\" destination-address=\"192.168.200.103\" destination-port=\"32768\" connection-tag=\"0\" service-name=\"Medium\" nat-source-address=\"192.168.100.103\" nat-source-port=\"8003\" nat-destination-address=\"192.168.200.103\" nat-destination-port=\"32768\" nat-connection-tag=\"0\" src-nat-rule-type=\"Fake src nat rule\" src-nat-rule-name=\"Fake src nat rule\" dst-nat-rule-type=\"Fake dst nat rule\" dst-nat-rule-name=\"Fake dst nat rule\" protocol-id=\"17\" policy-name=\"session_policy4\" source-zone-name=\"source_zone4\" destination-zone-name=\"Fake dst zone\" session-id-32=\"4\" packets-from-client=\"4294967295\" bytes-from-client=\"1073741824\" packets-from-server=\"4294967294\" bytes-from-server=\"1073741824\" elapsed-time=\"4294967291\" application=\"application4\" nested-application=\"nested_application4\" username=\"user4\" roles=\"Fake UAC roles\" packet-incoming-interface=\"source_interface4\" encrypted=\"Fake info telling if the traffic is encrypted\"]\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n self.xml.xml_string_to_dict(\"\"\"\n <security-logging-information>\n <show-hpl-infile>\n <entry>\n </entry>\n </show-hpl-infile>\n </security-logging-information>\n \"\"\"),\n )\n\n self.log.display(level=\"INFO\", msg=\"get security log report in detail dict value\")\n result = self.ins.get_security_log_with_cmd(\n device=None,\n type=\"session-close\",\n source_address=\"192.168.100.103\",\n check_content=\"cnrd-ngsrxqavm40\",\n exist=\"no\",\n )\n self.assertTrue(result)", "def test_login_field(self):\n field = self.record.find('field[@name=\\'login\\']')\n self.assertEqual(field.text, 'adt', 'Incorrect login Field')", "def test_login(self):\n\n print('\\n\\nEnter a valid LendingClub account information...')\n email = input('Email:')\n password = getpass.getpass()\n\n self.assertTrue(self.session.authenticate(email, password))\n print('Authentication successful')", "def testsecurity(self,id=0):\n return 'failed test security'", "def test_verbose_debug():\n output = subprocess.run(['smif', 'list', '-vv'], stderr=subprocess.PIPE)\n assert 'DEBUG' in str(output.stderr)", "def test_login_records_attributes(self):\n self.test_login()\n record = UserSocialAuth.objects.get(\n user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG\n )\n attributes = record.extra_data\n assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1.1.9') == ['[email protected]', '[email protected]']\n assert attributes.get('urn:oid:2.5.4.3') == ['Me Myself And I']\n assert attributes.get('urn:oid:0.9.2342.19200300.100.1.1') == ['myself']\n assert attributes.get('urn:oid:2.5.4.20') == ['555-5555']\n # Phone number", "def test_login_required(self):\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_fetch_log_valid():\n ident = _id()\n proj.fetch('test', ident)\n log = proj.fetch_log('test', ident)\n assert 'this should go into run.log' in log", "def test_login_url(self):\r\n res = self.testapp.get('/login', status=200)\r\n\r\n body_str = u\"Log In\"\r\n form_str = u'name=\"login\"'\r\n\r\n self.assertTrue(\r\n body_str in res.body,\r\n msg=\"Request should contain Log In: \" + res.body)\r\n\r\n # There should be a login form on there.\r\n self.assertTrue(\r\n form_str in res.body,\r\n msg=\"The login input should be visible in the body:\" + res.body)", "def test_func(debug: bool) -> None:\n click.echo(debug)", "def test_login (self):\n cli = Client ( )\n view_url = reverse ('accounts.views.login')\n resp = cli.get (view_url)\n self.assertTrue (resp.status_code == 200,\n \"The view %s returned %s\" % (view_url,\n resp.status_code))", "async def test_validate_login(hass: HomeAssistant, provider, capsys) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"test-user\", password=\"test-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth valid\\n\"\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"test-user\", password=\"invalid-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth invalid\\n\"\n\n await script_auth.validate_login(\n hass, provider, Mock(username=\"invalid-user\", password=\"test-pass\")\n )\n captured = capsys.readouterr()\n assert captured.out == \"Auth invalid\\n\"", "def test_login_required(self):\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)", "def test_logging():\n assert logger.name == 'wellcomeml.logger'", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def test_login(self):\n response = self.client.post(\"/login\", json=self.payload)\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.json.get(\"access_token\"))\n self.assertIsNotNone(response.json.get(\"refresh_token\"))", "def test_directOnLoginPage(self):\r\n print('========================================================================')\r\n print('Test for check redirect on Login page after link Login click')\r\n #Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n #cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n self.reg_page.click_login_lnk()\r\n\r\n #driver.get(self.login_page.LOGIN_URL)\r\n #wait = WebDriverWait(driver, 20)\r\n #element = wait.until(EC.title_is(self.reg_page.get_login_title()))\r\n\r\n time.sleep(2)\r\n assert self.reg_page.is_login_title_matches(), \"Login title page doesn't match\"\r\n print('--------- SUCCESS test_directOnLoginPage-----------')\r\n driver.quit()", "def sso_saml_login(request, idp_slug):\n login_url = request.saml2_auth.login()\n username = get_sso_username_from_session(request)\n if username:\n # verify that the stored user data actually the current IdP\n idp = IdentityProvider.get_active_identity_provider_by_username(username)\n if idp and idp.slug == idp_slug:\n # pre-populate username for Azure AD\n login_url = f'{login_url}&login_hint={username}'\n return HttpResponseRedirect(login_url)", "def assertion_callback(_request, _uri, headers):\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')", "def test_login(self):\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = 'Login'\n set_email = EMAIL\n set_admin = False\n continue_url = ''\n\n expected_set = login._set_user_info_cookie(set_email, set_admin).strip()\n\n # No continue URL.\n status, location, set_cookie, _ = self._run_test(\n host, path_info, cookie_dict, action, set_email, set_admin,\n continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n\n # Continue URL.\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(\n host, path_info, cookie_dict, action, set_email, set_admin,\n continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)", "def test_root_logger_config(self):\n with debug_env:\n logging.config.dictConfig(django12factor.factorise()[\"LOGGING\"])\n self.assertTrue(has_handler(logging.root, \"stdout\"))", "def ttest_login_logout(self):\n rv = self.login(\n app.config['USERNAME'],\n app.config['PASSWORD']\n )\n assert b'You were logged in' in rv.data\n rv = self.logout()\n assert b'You were logged out' in rv.data\n rv = self.login(\n app.config['USERNAME'] + 'x',\n app.config['PASSWORD']\n )\n assert b'Invalid username' in rv.data\n rv = self.login(\n app.config['USERNAME'],\n app.config['PASSWORD'] + 'x'\n )\n assert b'Invalid password' in rv.data", "def debug() -> bool:", "def test_verbose_debug_alt():\n output = subprocess.run(['smif', 'list', '--verbose', '--verbose'], stderr=subprocess.PIPE)\n assert 'DEBUG' in str(output.stderr)", "def test_enabled_enabled_in_logs():\n stmt = sqlalchemy.select([sqlalchemy.func.count()]).select_from(_LOGGING_TABLE)\n config_info = read_config()\n config_info['enabled'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n # time.sleep(convert_sleep(config_info['wait']))\n data_before_wait = execute_command_with_return_value(stmt)\n time.sleep(convert_sleep(config_info['wait'])*2)\n data_after_wait = execute_command_with_return_value(stmt)\n\n assert int(data_before_wait[0][0]) < int(data_after_wait[0][0])", "def test_login_GET(self):\r\n\r\n with self.client:\r\n response = self.client.get('/login')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(\r\n b'Please enter your credentials to continue.', response.data)", "def test_setup_logging_verbose(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n setup_logging(LogLevels.VERBOSE)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertFalse(self.boto3_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.botocore_logger.isEnabledFor(LogLevels.INFO))", "def log_successful_login(sender, request, user, **kwargs):\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info(u\"Login success - user.id: {0}\".format(user.id))\r\n else:\r\n AUDIT_LOG.info(u\"Login success - {0} ({1})\".format(user.username, user.email))", "def test_login_view(self):\n url = reverse('xds_api:login')\n # create user, save user, login using client\n XDSUser.objects.create_user(self.email,\n self.password,\n first_name=self.first_name,\n last_name=self.last_name)\n\n response = self.client.post(url, self.userDict_login)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(responseDict['token'] is not None)\n self.assertTrue(responseDict['user'] is not None)" ]
[ "0.6547012", "0.61618876", "0.6142526", "0.6016165", "0.5957221", "0.59324735", "0.5907222", "0.58748484", "0.58701736", "0.5860925", "0.582712", "0.5819717", "0.57623357", "0.5748084", "0.57179964", "0.56889266", "0.56684595", "0.56650877", "0.5635419", "0.5612174", "0.55851114", "0.5562649", "0.5553079", "0.5520293", "0.5515169", "0.55113184", "0.5478973", "0.54718214", "0.5471259", "0.5464586", "0.54638547", "0.5442302", "0.54375803", "0.5434043", "0.5420376", "0.5415847", "0.5410498", "0.5408319", "0.54057336", "0.54009014", "0.53988916", "0.5383864", "0.5364239", "0.53635806", "0.53577167", "0.53478813", "0.53475755", "0.53427273", "0.5340477", "0.533927", "0.53383267", "0.5330945", "0.53248394", "0.53196514", "0.53146136", "0.5311347", "0.5305563", "0.5303096", "0.5298549", "0.52941877", "0.52938175", "0.5293296", "0.5288616", "0.52852166", "0.5281907", "0.5275286", "0.5271601", "0.5269318", "0.52573884", "0.52539843", "0.5245905", "0.52454084", "0.52341586", "0.52307576", "0.5227804", "0.5221505", "0.5215907", "0.52155685", "0.5214548", "0.5214245", "0.52039975", "0.5194448", "0.51925224", "0.5175907", "0.5174047", "0.5172051", "0.5170244", "0.5161848", "0.5161495", "0.51600486", "0.51594937", "0.51582277", "0.5155226", "0.51540124", "0.51527375", "0.51408136", "0.51402694", "0.5134646", "0.5130446", "0.5128094" ]
0.8257416
0
Enable and configure the TestShib SAML IdP as a third_party_auth provider
def test_configure_testshib_provider_with_cache_duration(self): kwargs = {} kwargs.setdefault('name', self.PROVIDER_NAME) kwargs.setdefault('enabled', True) kwargs.setdefault('visible', True) kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG) kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID) kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION) kwargs.setdefault('icon_class', 'fa-university') kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName self.configure_saml_provider(**kwargs) assert httpretty.is_enabled() num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata() assert num_total == 1 assert num_skipped == 0 assert num_attempted == 1 assert num_updated == 1 assert num_failed == 0 assert len(failure_messages) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)", "def init_saml_auth(saml_prepared_flask_request):\n return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=app.config.get('SAML_PATH', None))", "def init_saml_auth(req):\n auth = OneLogin_Saml2_Auth(req, custom_base_path=app.config[\"SAML_PATH\"])\n return auth", "def __init__(self, base_url):\n module_base = \"%s/%s\" % (base_url, Saml2BackendPlugin.provider)\n sp_config = {\n \"entityid\": \"%s/proxy_sp.xml\" % module_base,\n \"service\": {\n \"sp\": {\n \"allow_unsolicited\": True,\n \"endpoints\": {\n \"assertion_consumer_service\": [\n (\"%s/acs/post\" % module_base, BINDING_HTTP_POST),\n (\"%s/acs/redirect\" % module_base, BINDING_HTTP_REDIRECT)\n ],\n }\n }\n },\n \"key_file\": TestConfiguration.get_instance().backend_key.name,\n \"cert_file\": TestConfiguration.get_instance().backend_cert.name,\n \"metadata\": {\n \"local\": TestConfiguration.get_instance().fake_idp_metadata,\n },\n\n \"xmlsec_binary\": TestConfiguration.get_instance().xmlsec_path,\n }\n config = {\"config\": sp_config,\n \"idp_entity_id\": \"https://example.com/unittest_idp.xml\",\n \"state_id\": \"saml_backend_test_id\"\n }\n\n super(Saml2BackendPlugin, self).__init__(SamlBackend, Saml2BackendPlugin.provider, config)", "def add_tomcat7_idp():\n pass", "def setup_provider(self):\n pass", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def get_saml_auth(flask_request):\n return OneLogin_Saml2_Auth(prepare_flask_request_for_saml(flask_request), custom_base_path=app.config.get('SAML_PATH', None))", "def __init__(self, base_url):\n idpconfig = {\n \"entityid\": \"{}/proxy.xml\".format(base_url),\n \"service\": {\n \"idp\": {\n \"endpoints\": {\n \"single_sign_on_service\": [(\"%s/%s/sso/redirect\" %\n (base_url, Saml2BackendPlugin.provider),\n BINDING_HTTP_REDIRECT),\n (\"%s/%s/sso/post\" %\n (base_url, Saml2BackendPlugin.provider),\n BINDING_HTTP_POST)]\n },\n },\n },\n \"key_file\": TestConfiguration.get_instance().frontend_key.name,\n \"cert_file\": TestConfiguration.get_instance().frontend_cert.name,\n \"metadata\": {\n \"local\": TestConfiguration.get_instance().fake_sp_metadata,\n },\n \"xmlsec_binary\": TestConfiguration.get_instance().xmlsec_path,\n }\n\n config = {\"idp_config\": idpconfig,\n \"endpoints\": Saml2FrontendPlugin.endpoints,\n \"base\": base_url,\n \"state_id\": \"saml_frontend_state_id\"}\n\n super(Saml2FrontendPlugin, self).__init__(SamlFrontend, \"Saml2IDP\", config)", "def setup(cls, transport_config):\n cls.we_are_initiator = transport_config.weAreClient\n\n # Check for shared-secret in the server transport options.\n transport_options = transport_config.getServerTransportOptions()\n if transport_options and \"shared-secret\" in transport_options:\n log.debug(\"Setting shared-secret from server transport options: '%s'\", transport_options[\"shared-secret\"])\n cls.shared_secret = transport_options[\"shared-secret\"]", "def enable_sso(DirectoryId=None, UserName=None, Password=None):\n pass", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def test_externalauth_login_required_course_context(self):\r\n TARGET_URL = reverse('courseware', args=[self.course.id.to_deprecated_string()]) # pylint: disable=C0103\r\n noshib_response = self.client.get(TARGET_URL, follow=True)\r\n self.assertEqual(noshib_response.redirect_chain[-1],\r\n ('http://testserver/accounts/login?next={url}'.format(url=TARGET_URL), 302))\r\n self.assertContains(noshib_response, (\"Log into your {platform_name} Account | {platform_name}\"\r\n .format(platform_name=settings.PLATFORM_NAME)))\r\n self.assertEqual(noshib_response.status_code, 200)\r\n\r\n TARGET_URL_SHIB = reverse('courseware', args=[self.shib_course.id.to_deprecated_string()]) # pylint: disable=C0103\r\n shib_response = self.client.get(**{'path': TARGET_URL_SHIB,\r\n 'follow': True,\r\n 'REMOTE_USER': self.extauth.external_id,\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/'})\r\n # Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain\r\n # The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we\r\n # won't test its contents\r\n self.assertEqual(shib_response.redirect_chain[-3],\r\n ('http://testserver/shib-login/?next={url}'.format(url=TARGET_URL_SHIB), 302))\r\n self.assertEqual(shib_response.redirect_chain[-2],\r\n ('http://testserver{url}'.format(url=TARGET_URL_SHIB), 302))\r\n self.assertEqual(shib_response.status_code, 200)", "def includeme(config):\n # authentication\n auth_secret = os.environ.get('AUTH_SECRET', '')\n auth_policy = AuthTktAuthenticationPolicy(\n secret=auth_secret,\n hashalg='sha512'\n )\n config.set_authentication_policy(auth_policy)\n # authorization\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n config.set_root_factory(MyRoot)\n\n session_secret = os.environ.get('SESSION_SECRET', '')\n session_factory = SignedCookieSessionFactory(session_secret)\n config.set_session_factory(session_factory)\n config.set_default_csrf_options(require_csrf=True)", "def includeme(config):\n # Grab the pyramid-wide settings, to look for any auth config.\n settings = config.get_settings().copy()\n # Use the settings to construct an AuthenticationPolicy.\n authn_policy = SRPAuthenticationPolicy.from_settings(settings)\n config.set_authentication_policy(authn_policy)\n # Hook up a default AuthorizationPolicy.\n # You can't have one without the other, and ACLAuthorizationPolicy is\n # usually what you want. If the app configures one explicitly then this\n # will get overridden.\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n # Add forbidden view to challenge for auth credentials.\n config.add_view(authn_policy.challenge_view,\n context=\"pyramid.exceptions.Forbidden\")", "def samladsv3(self):\n try:\n # Get the federated credentials from the user\n print(\"[-] Get authentication token\")\n print(\"Email:\", end=' ')\n username = input()\n password = getpass.getpass()\n print('')\n\n # Initiate session handler\n session = requests.Session()\n\n # Programmatically get the SAML assertion\n # Opens the initial IdP url and follows all of the HTTP302 redirects, and\n # gets the resulting login page\n formresponse = session.get(idpentryurl, verify=sslverification)\n # Capture the idpauthformsubmiturl, which is the final url after all the 302s\n idpauthformsubmiturl = formresponse.url\n\n # Parse the response and extract all the necessary values\n # in order to build a dictionary of all of the form values the IdP expects\n formsoup = BeautifulSoup(formresponse.text, \"html.parser\")\n payload = {}\n\n for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):\n name = inputtag.get('name','')\n value = inputtag.get('value','')\n if \"user\" in name.lower():\n #Make an educated guess that this is the right field for the username\n payload[name] = username\n elif \"email\" in name.lower():\n #Some IdPs also label the username field as 'email'\n payload[name] = username\n elif \"pass\" in name.lower():\n #Make an educated guess that this is the right field for the password\n payload[name] = password\n else:\n #Simply populate the parameter with the existing value (picks up hidden fields in the login form)\n payload[name] = value\n\n # Debug the parameter payload if needed\n # Use with caution since this will print sensitive output to the screen\n #print(payload)\n\n # Some IdPs don't explicitly set a form action, but if one is set we should\n # build the idpauthformsubmiturl by combining the scheme and hostname\n # from the entry url with the form action target\n # If the action tag doesn't exist, we just stick with the\n # idpauthformsubmiturl above\n for inputtag in formsoup.find_all(re.compile('(FORM|form)')):\n action = inputtag.get('action')\n loginid = inputtag.get('id')\n if (action and loginid == \"loginForm\"):\n parsedurl = urlparse(idpentryurl)\n idpauthformsubmiturl = parsedurl.scheme + \"://\" + parsedurl.netloc + action\n\n # Performs the submission of the IdP login form with the above post data\n response = session.post(\n idpauthformsubmiturl, data=payload, verify=sslverification)\n\n # Debug the response if needed\n #print(response.text)\n\n # Overwrite and delete the credential variables, just for safety\n username = '##############################################'\n password = '##############################################'\n del username\n del password\n\n # Decode the response and extract the SAML assertion\n soup = BeautifulSoup(response.text, \"html.parser\")\n assertion = ''\n\n # Look for the SAMLResponse attribute of the input tag (determined by\n # analyzing the debug print lines above)\n for inputtag in soup.find_all('input'):\n if(inputtag.get('name') == 'SAMLResponse'):\n #print(inputtag.get('value'))\n assertion = inputtag.get('value')\n\n # Better error handling is required for production use.\n if (assertion == ''):\n #TODO: Insert valid error checking/handling\n print('Response did not contain a valid SAML assertion')\n sys.exit(0)\n\n # Debug only\n #print(base64.b64decode(assertion))\n\n # Parse the returned assertion and extract the authorized roles\n awsroles = []\n root = ET.fromstring(base64.b64decode(assertion))\n for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):\n if (saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role'):\n for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):\n awsroles.append(saml2attributevalue.text)\n\n # Note the format of the attribute value should be role_arn,principal_arn\n # but lots of blogs list it as principal_arn,role_arn so let's reverse\n # them if needed\n for awsrole in awsroles:\n chunks = awsrole.split(',')\n if'saml-provider' in chunks[0]:\n newawsrole = chunks[1] + ',' + chunks[0]\n index = awsroles.index(awsrole)\n awsroles.insert(index, newawsrole)\n awsroles.remove(awsrole)\n\n # If I have more than one role, ask the user which one they want,\n # otherwise just proceed\n print(\"\")\n if len(awsroles) > 1:\n i = 0\n print(\"Please choose the role you would like to assume:\")\n for awsrole in awsroles:\n print('[', i, ']: ', awsrole.split(',')[0])\n i += 1\n print(\"Selection: \", end=' ')\n selectedroleindex = input()\n\n # Basic sanity check of input\n if int(selectedroleindex) > (len(awsroles) - 1):\n print('You selected an invalid role index, please try again')\n sys.exit(0)\n\n role_arn = awsroles[int(selectedroleindex)].split(',')[0]\n principal_arn = awsroles[int(selectedroleindex)].split(',')[1]\n else:\n role_arn = awsroles[0].split(',')[0]\n principal_arn = awsroles[0].split(',')[1]\n\n # Use the assertion to get an AWS STS token using Assume Role with SAML\n conn = boto3.client('sts', region_name=region)\n token = conn.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion)\n\n # Read in the existing config file\n config = configparser.RawConfigParser()\n config.read(credentials)\n\n # Put the credentials into a saml specific section instead of clobbering\n # the default credentials\n if not config.has_section('saml'):\n config.add_section('saml')\n\n config['saml']['output'] = outputformat\n config['saml']['region'] = region\n config['saml']['aws_access_key_id'] = token['Credentials']['AccessKeyId']\n config['saml']['aws_secret_access_key'] = token['Credentials']['SecretAccessKey']\n config['saml']['aws_session_token'] = token['Credentials']['SessionToken']\n\n # Write the updated config file\n with open(credentials, 'w+') as configfile:\n config.write(configfile)\n\n # Give the user some basic info as to what has just happened\n print('\\n\\n----------------------------------------------------------------')\n print('Your new access key pair has been stored in the AWS configuration file {0} under the saml profile.'.format(credentials))\n print('Note that it will expire at {0}.'.format(token['Credentials']['Expiration'].astimezone(get_localzone())))\n print('After this time, you may safely rerun this script to refresh your access key pair.')\n print('To use this credential, call the AWS CLI with the --profile option (e.g. aws --profile saml ec2 describe-instances).')\n print('----------------------------------------------------------------\\n\\n')\n\n return samladsv3\n\n except Exception as e:\n print(\"Error while getting authentication token. %s\" % e)", "def setup_auth_turing(cluster):\n # Read in auth info\n azure_file = os.path.join(ABSOLUTE_HERE, \"secrets\", \"turing-auth-key-prod.json\")\n with open(azure_file, \"r\") as stream:\n azure = json.load(stream)\n\n # Login in to Azure\n login_cmd = [\n \"az\", \"login\", \"--service-principal\",\n \"--username\", azure[\"sp-app-id\"],\n \"--password\", azure[\"sp-app-key\"],\n \"--tenant\", azure[\"tenant-id\"]\n ]\n subprocess.check_output(login_cmd)\n\n # Set kubeconfig\n creds_cmd = [\n \"az\", \"aks\", \"get-credentials\",\n \"--name\", cluster,\n \"--resource-group\", \"binder-prod\"\n\n ]\n stdout = subprocess.check_output(creds_cmd)\n print(stdout.decode('utf-8'))", "def test_open_id_setup(self):\r\n self.attempt_login(200)", "def request_app_setup(hass, config, add_devices, discovery_info=None):\n from requests.compat import urljoin\n from requests_oauthlib import OAuth2Session\n configurator = hass.components.configurator\n authorization_base_url = urljoin(BASE_URL, '/oauth/authorize')\n oauth = OAuth2Session(config[CONF_CLIENT_ID], redirect_uri=REDIRECT_URI, state=None)\n\n def trakt_configuration_callback(data):\n \"\"\"Run when the configuration callback is called.\"\"\"\n token_url = urljoin(BASE_URL, '/oauth/token')\n oauth.fetch_token(token_url, client_secret=config[CONF_CLIENT_SECRET], code=data.get('pin_code'))\n token = oauth.token['access_token']\n save_token(hass, token)\n continue_setup_platform(hass, config, token, add_devices, discovery_info)\n\n if 'trakt' not in _CONFIGURING:\n authorization_url, _ = oauth.authorization_url(authorization_base_url, username=config[CONF_USERNAME])\n\n _CONFIGURING['trakt'] = configurator.request_config(\n 'Trakt',\n trakt_configuration_callback,\n description=\"Enter pin code from Trakt: \" + authorization_url,\n submit_caption='Verify',\n fields=[{\n 'id': 'pin_code',\n 'name': \"Pin code\",\n 'type': 'string'}]\n )", "def test_auth0_config_anon(anontestapp, registry):\n _test_auth_config(anontestapp, registry)", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def provider(hass):\n provider = hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )\n hass.loop.run_until_complete(provider.async_initialize())\n return provider", "def on_identity_loaded(sender, identity):\n key = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_SESSION_KEY\",\n OAUTHCLIENT_CERN_OPENID_SESSION_KEY,\n )\n identity.provides.update(session.get(key, []))", "def post_setup(cls):\n super().post_setup()\n\n # The SENTRY_DSN setting should be available to activate sentry for an environment\n if cls.SENTRY_DSN is not None:\n sentry_sdk.init( # pylint: disable=abstract-class-instantiated\n dsn=cls.SENTRY_DSN,\n environment=cls._get_environment(),\n release=get_release(),\n integrations=[DjangoIntegration()],\n )\n with sentry_sdk.configure_scope() as scope:\n scope.set_extra(\"application\", \"backend\")", "def test_client_key_secret(self):\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['lti_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n self.xmodule.lti_id = \"lti_id\"\r\n key, secret = self.xmodule.get_client_key_secret()\r\n expected = ('test_client', 'test_secret')\r\n self.assertEqual(expected, (key, secret))", "def test_shib_login_enrollment(self):\r\n student = UserFactory.create()\r\n extauth = ExternalAuthMap(external_id='[email protected]',\r\n external_email='',\r\n external_domain='shib:https://idp.stanford.edu/',\r\n external_credentials=\"\",\r\n internal_password=\"password\",\r\n user=student)\r\n student.set_password(\"password\")\r\n student.save()\r\n extauth.save()\r\n\r\n course = CourseFactory.create(org='Stanford', number='123', display_name='Shib Only')\r\n course.enrollment_domain = 'shib:https://idp.stanford.edu/'\r\n self.store.update_item(course, '**replace_user**')\r\n\r\n # use django test client for sessions and url processing\r\n # no enrollment before trying\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n self.client.logout()\r\n request_kwargs = {'path': '/shib-login/',\r\n 'data': {'enrollment_action': 'enroll', 'course_id': course.id.to_deprecated_string(), 'next': '/testredirect'},\r\n 'follow': False,\r\n 'REMOTE_USER': '[email protected]',\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/'}\r\n response = self.client.get(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n # Clean up and try again with POST (doesn't happen with real production shib, doing this for test coverage)\r\n self.client.logout()\r\n CourseEnrollment.unenroll(student, course.id)\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n response = self.client.post(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))", "def setup_course_with_proctoring_backend(self, proctoring_provider, escalation_email):\n course = CourseFactory.create(enable_proctored_exams=True,\n enable_timed_exams=True,\n proctoring_provider=proctoring_provider,\n proctoring_escalation_email=escalation_email)\n self.setup_course_url(course)", "def test_client_key_secret(self):\n #this adds lti passports to system\n mocked_course = Mock(lti_passports=['lti_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = \"lti_id\"\n key, secret = self.xmodule.get_client_key_secret()\n expected = ('test_client', 'test_secret')\n assert expected == (key, secret)", "def svn_client_get_ssl_server_trust_prompt_provider(svn_auth_provider_object_t_provider, svn_auth_ssl_server_trust_prompt_func_t_prompt_func, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def set_auth_credentials():\n import os\n from passlib.apps import custom_app_context as pwd_context\n\n os.environ[\"AUTH_USERNAME\"] = \"testme\"\n os.environ[\"AUTH_PASSWORD\"] = pwd_context.hash(\"foobar\")", "def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def sso_saml_acs(request, idp_slug):\n # todo these are placeholders for the json dump below\n error_reason = None\n request_session_data = None\n saml_relay = None\n\n request_id = request.session.get('AuthNRequestID')\n processed_response = request.saml2_auth.process_response(request_id=request_id)\n errors = request.saml2_auth.get_errors()\n not_auth_warn = not request.saml2_auth.is_authenticated()\n\n if not errors:\n if 'AuthNRequestID' in request.session:\n del request.session['AuthNRequestID']\n\n store_saml_data_in_session(request)\n\n user = auth.authenticate(\n request=request,\n username=request.session['samlNameId'],\n idp_slug=idp_slug,\n is_handshake_successful=True,\n )\n\n # we add the messages to the django messages framework here since\n # that middleware was not available for SsoBackend\n if hasattr(request, 'sso_new_user_messages'):\n for success_message in request.sso_new_user_messages['success']:\n messages.success(request, success_message)\n for error_message in request.sso_new_user_messages['error']:\n messages.error(request, error_message)\n\n if user:\n auth.login(request, user)\n\n # activate new project if needed\n project_name = get_new_sso_user_project_name_from_session(request)\n if project_name:\n try:\n request_new_domain(request, project_name, is_new_user=True)\n except NameUnavailableException:\n # this should never happen, but in the off chance it does\n # we don't want to throw a 500 on this view\n messages.error(\n request,\n _(\"We were unable to create your requested project \"\n \"because the name was already taken.\"\n \"Please contact support.\")\n )\n\n clear_sso_registration_data_from_session(request)\n return redirect(\"homepage\")\n\n # todo for debugging purposes to dump into the response below\n request_session_data = {\n \"samlUserdata\": request.session['samlUserdata'],\n \"samlNameId\": request.session['samlNameId'],\n \"samlNameIdFormat\": request.session['samlNameIdFormat'],\n \"samlNameIdNameQualifier\": request.session['samlNameIdNameQualifier'],\n \"samlNameIdSPNameQualifier\": request.session['samlNameIdSPNameQualifier'],\n \"samlSessionIndex\": request.session['samlSessionIndex'],\n }\n\n else:\n error_reason = request.saml2_auth.get_last_error_reason()\n\n return HttpResponse(json.dumps({\n \"errors\": errors,\n \"error_reason\": error_reason,\n \"not_auth_warn\": not_auth_warn,\n \"request_id\": request_id,\n \"processed_response\": processed_response,\n \"saml_relay\": saml_relay,\n \"request_session_data\": request_session_data,\n \"login_error\": getattr(request, 'sso_login_error', None),\n }), 'text/json')", "def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')", "def sso_saml_login(request, idp_slug):\n login_url = request.saml2_auth.login()\n username = get_sso_username_from_session(request)\n if username:\n # verify that the stored user data actually the current IdP\n idp = IdentityProvider.get_active_identity_provider_by_username(username)\n if idp and idp.slug == idp_slug:\n # pre-populate username for Azure AD\n login_url = f'{login_url}&login_hint={username}'\n return HttpResponseRedirect(login_url)", "def configure_irida_galaxy_connection(self, galaxy_url):\n self.configure_tool('IRIDA', 'client_secret', self.REDIRECT_CLIENT_SECRET)\n self.configure_tool('Galaxy', 'galaxy_url', galaxy_url)", "def test_client_key_secret_not_provided(self):\r\n\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n #set another lti_id\r\n self.xmodule.lti_id = \"another_lti_id\"\r\n key_secret = self.xmodule.get_client_key_secret()\r\n expected = ('','')\r\n self.assertEqual(expected, key_secret)", "def _get_saml_client(domain):\n acs_url = domain + '/sso/acs/'\n time_slack = 0\n mdata = tempfile.NamedTemporaryFile()\n f = open(mdata.name, 'wb')\n f.write(_urllib.urlopen(\n settings.SAML2_AUTH['METADATA_AUTO_CONF_URL']).read()\n )\n f.close()\n saml_settings = {\n 'metadata': {\n 'local': [mdata.name],\n },\n 'entityid': acs_url,\n 'service': {\n 'sp': {\n 'endpoints': {\n 'assertion_consumer_service': [\n (acs_url, BINDING_HTTP_REDIRECT),\n (acs_url, BINDING_HTTP_POST)\n ],\n },\n 'allow_unsolicited': True,\n 'authn_requests_signed': False,\n 'logout_requests_signed': True,\n 'want_assertions_signed': True,\n 'want_response_signed': False,\n },\n },\n 'accepted_time_diff': time_slack,\n }\n\n spConfig = Saml2Config()\n spConfig.load(saml_settings)\n spConfig.allow_unknown_attributes = True\n saml_client = Saml2Client(config=spConfig)\n mdata.close()\n return saml_client", "def _setup_threat_intel_auth_subparser(subparsers):\n generate_subparser(\n subparsers,\n 'update-auth',\n description='Enable, disable, or configure the threat intel downloader function',\n subcommand=True\n )", "def set_up_login():\n\n bitool.app.testing = True\n bitool.app.config['TESTING'] = True\n bitool.app.login_manager.init_app(bitool.app)\n app = bitool.app.test_client()\n\n return app", "def test_add_trusted_project(self):\n pass", "def svn_client_get_simple_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def test_client_key_secret_not_provided(self):\n\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n # set another lti_id\n self.xmodule.lti_id = \"another_lti_id\"\n key_secret = self.xmodule.get_client_key_secret()\n expected = ('', '')\n assert expected == key_secret", "def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def set_credentials():", "def test_read_env_config3(config, environment_vars_set):\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"goood\"", "def test_add_trusted_project3(self):\n pass", "def configure_aaa_local_auth(device):\n try:\n device.configure([\n \"aaa authentication dot1x default local\",\n \"aaa local authentication default authorization default\",\n \"aaa authorization network default local\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA local auth'\n )", "def update_identity_provider(module, sdk, cloud, idp):\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n attributes = {}\n\n if (description is not None) and (description != idp.description):\n attributes['description'] = description\n if (enabled is not None) and (enabled != idp.is_enabled):\n attributes['enabled'] = enabled\n if (domain_id is not None) and (domain_id != idp.domain_id):\n attributes['domain_id'] = domain_id\n if (remote_ids is not None) and (remote_ids != idp.remote_ids):\n attributes['remote_ids'] = remote_ids\n\n if not attributes:\n return False, idp\n\n if module.check_mode:\n return True, None\n\n try:\n new_idp = cloud.identity.update_identity_provider(idp, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to update identity provider: {0}'.format(str(ex)))\n return (True, new_idp)", "def configure_ext_login(app):\n lm.init_app(app)\n\n @lm.user_loader\n def load_user(userid):\n \"\"\"\n Needed for flask-login.\n \"\"\"\n return models.User.query.get(int(userid))\n\n @app.before_request\n def set_g_user():\n g.user = current_user", "def get_sp_auth_url(self, session, sp_id, **kwargs):\n return None", "def test_add_trusted_project2(self):\n pass", "def configure_auth(self, auth_type, ha_type):\n yield self.configure_kerberos(auth_type, ha_type)\n self.configure_radius(auth_type)", "def test_add_trusted_project1(self):\n pass", "def register_auth_opts(conf, group, service_type=None):\n ks_loading.register_session_conf_options(conf, group)\n ks_loading.register_auth_conf_options(conf, group)\n CONF.set_default('auth_type', default='password', group=group)\n ks_loading.register_adapter_conf_options(conf, group)\n conf.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group)\n if service_type:\n conf.set_default('service_type', service_type, group=group)\n else:\n types = os_service_types.get_service_types()\n key = 'ironic-inspector' if group == 'inspector' else group\n service_types = types.service_types_by_project.get(key)\n if service_types:\n conf.set_default('service_type', service_types[0], group=group)", "def test_add_trusted_project4(self):\n pass", "def svn_client_get_ssl_server_trust_file_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def session_setup(opts: Dict[Any, Any]) -> Any: #TODO\n stype = ''\n if 'serverca' in opts and 'cert' in opts:\n stype = 'ssl'\n s = session.get(stype, **opts)\n if s is None:\n raise errors.KojiError('Unable to idenify authentication type.')\n s.login()\n if not s.is_ok():\n raise errors.AuthError('Unable to validate session')\n return s", "def configure_https():\n # need to write all to ensure changes to the entire request pipeline\n # propagate (c-api, haprxy, apache)\n CONFIGS.write_all()\n if 'https' in CONFIGS.complete_contexts():\n cmd = ['a2ensite', 'openstack_https_frontend']\n subprocess.check_call(cmd)\n else:\n cmd = ['a2dissite', 'openstack_https_frontend']\n subprocess.check_call(cmd)\n\n # TODO: improve this by checking if local CN certs are available\n # first then checking reload status (see LP #1433114).\n service_reload('apache2', restart_on_failure=True)\n\n for rid in relation_ids('identity-service'):\n identity_joined(rid=rid)", "def test_add_trusted_project7(self):\n pass", "def setUp(self):\r\n super(CLITestAuthKeystoneWithIdandName, self).setUp()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def assert_first_party_auth_trumps_third_party_auth(self, email=None, password=None, success=None):\r\n _, strategy = self.get_request_and_strategy(\r\n auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')\r\n strategy.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))\r\n self.create_user_models_for_existing_account(\r\n strategy, email, password, self.get_username(), skip_social_auth=True)\r\n\r\n strategy.request.POST = dict(strategy.request.POST)\r\n\r\n if email:\r\n strategy.request.POST['email'] = email\r\n if password:\r\n strategy.request.POST['password'] = 'bad_' + password if success is False else password\r\n\r\n self.assert_pipeline_running(strategy.request)\r\n payload = json.loads(student_views.login_user(strategy.request).content)\r\n\r\n if success is None:\r\n # Request malformed -- just one of email/password given.\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('There was an error receiving your login information', payload.get('value'))\r\n elif success:\r\n # Request well-formed and credentials good.\r\n self.assertTrue(payload.get('success'))\r\n else:\r\n # Request well-formed but credentials bad.\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('incorrect', payload.get('value'))", "def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"", "def initiateAuthentication(identity_url, return_to=None):", "def test_proctortrack_provider_with_email(self):\n self.setup_course_with_proctoring_backend('proctortrack', '[email protected]')\n\n self.instructor.is_staff = True\n self.instructor.save()\n self._assert_escalation_email_available(True)", "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def config_setup(self, config):\n super(PushGatewayApiV1TestCase, self).config_setup(config)\n config[\"apps\"][\"com.example.spqr\"] = {\n \"type\": \"tests.test_pushgateway_api_v1.TestPushkin\"\n }", "def setup_class(self):\n self.endpoint = VERSION_PREFIX + '/auth/login'\n self.test_client = create_app().test_client()", "def __init__(self, requestor, client_id, client_secret, redirect_uri=None):\n super(TrustedAuthenticator, self).__init__(requestor, client_id,\n redirect_uri)\n self.client_secret = client_secret", "def install(self, provider):\n pass # pragma: no cover", "def test_auth_test(self):\n backend = LdapBackend()\n backend.authenticate(None, username=\"apple\", password=\"ffffff\")", "def __init__(__self__, *,\n authorization_strategy: pulumi.Input['FhirDatastoreIdentityProviderConfigurationAuthorizationStrategy'],\n fine_grained_authorization_enabled: Optional[pulumi.Input[bool]] = None,\n idp_lambda_arn: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"authorization_strategy\", authorization_strategy)\n if fine_grained_authorization_enabled is not None:\n pulumi.set(__self__, \"fine_grained_authorization_enabled\", fine_grained_authorization_enabled)\n if idp_lambda_arn is not None:\n pulumi.set(__self__, \"idp_lambda_arn\", idp_lambda_arn)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)", "def init():\n ok = not g.unitTesting\n if ok:\n g.plugin_signon(__name__)\n return ok", "def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_add_trusted_project5(self):\n pass", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def configure_aaa_session_id(device, type):\n try:\n device.configure([\n f\"aaa session-id {type}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA session ID'\n )", "def setup_module():\n pytest.test_user = fake_user.FakeUser()", "def test_add_trusted_project6(self):\n pass", "def setUp(self):\n application.config['TESTING'] = True\n application.config['WTF_CSRF_ENABLED'] = False\n application.config['DEBUG'] = False\n self.app = application.test_client()\n # setup plaid client\n self.client = Client(\n ENV_VARS[\"PLAID_CLIENT_ID\"],\n ENV_VARS[\"PLAID_SECRET\"],\n ENV_VARS[\"PLAID_PUBLIC_KEY\"],\n \"sandbox\"\n )\n self.public_token = sandbox.PublicToken(self.client)\n db.drop_all()\n db.create_all()", "def sign_in(user_email, user_id):\n ourTestbed.setup_env(\n user_email=user_email,\n user_id=str(user_id),\n user_is_admin='0', # This was for GAE user admin, we use AppUser.\n overwrite=True)", "def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n client_id: pulumi.Input[str],\n principal_id: pulumi.Input[str],\n secret: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'servicePrincipalSecret')\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"principal_id\", principal_id)\n pulumi.set(__self__, \"secret\", secret)", "def test_replace_o_auth_client(self):\n pass", "def init() -> None:\n # Setup elib_config\n elib_config.ELIBConfig.setup(\n app_version=__version__,\n app_name='ESST',\n config_file_path='esst.toml',\n config_sep_str='__',\n )\n\n # Write example config file\n elib_config.write_example_config('esst.toml.example')\n\n # Validate config\n try:\n elib_config.validate_config()\n except elib_config.ConfigMissingValueError as error:\n LOGGER.error('missing mandatory config value: %s', error.value_name)\n LOGGER.error('please read \"esst.toml.example\" for instructions on how to setup the configuration for ESST')\n sys.exit(1)\n\n for config in SentryConfigContext.__subclasses__():\n SENTRY.register_context(context_name=config.__name__, context_provider=config)", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def setup(provider):\n try:\n logger.info(f\"Setting up {provider.__name__}_{os.getenv('step')}\")\n return getattr(provider(os.getenv('host')),\n f'setup_{os.getenv(\"step\", \"first\")}_step')()\n except Exception as err:\n logger.exception(f\"Could not configure {provider}: {err}\")\n raise", "def create_identity_provider(module, sdk, cloud, name):\n\n if module.check_mode:\n return True, None\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n if enabled is None:\n enabled = True\n if remote_ids is None:\n remote_ids = []\n\n attributes = {\n 'domain_id': domain_id,\n 'enabled': enabled,\n 'remote_ids': remote_ids,\n }\n if description is not None:\n attributes['description'] = description\n\n try:\n idp = cloud.identity.create_identity_provider(id=name, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to create identity provider: {0}'.format(str(ex)))\n return (True, idp)", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "def auth_token_provider_relaxed_ssl(self, auth_token_provider_relaxed_ssl):\n\n self._auth_token_provider_relaxed_ssl = auth_token_provider_relaxed_ssl", "def add_virtual_authenticator(self, config):\n pass", "def enable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n\n cmd = f'aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa authentication login:\\n{e}'\n )", "def setUp(self):\n self.new_credentials = Credentials(\"Facebook\",\"Josphato\",\"jose!!otieno@45\")", "def setUp(self):\n self.ol = OneloginAWS(\n _MockSection(\n base_uri=\"https://api.us.onelogin.com/\",\n client_id='mock-id',\n client_secret='mock-secret',\n aws_app_id='mock-app-id',\n subdomain='example',\n can_save_password=False,\n username='mock-username',\n duration_seconds=2600,\n auto_determine_ip_address=False,\n ),\n )\n\n self.ol.password = \"mock-password\"\n\n self.get_saml_assertion_mock = MagicMock(return_value=Namespace(\n mfa=Namespace(\n devices=[Namespace(type='mock1', id='mock-id-1'), ],\n state_token='mock-token'\n ),\n ))\n self.get_saml_assertion_verifying_mock = MagicMock(\n return_value='mock-saml-response'\n )\n self.ol.ol_client = Namespace(\n get_saml_assertion=self.get_saml_assertion_mock,\n get_saml_assertion_verifying=(\n self.get_saml_assertion_verifying_mock\n ),\n error=None,\n )", "def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)", "def _enable(cls, provider):\r\n if provider.NAME in cls._ENABLED:\r\n raise ValueError('Provider %s already enabled' % provider.NAME)\r\n cls._ENABLED[provider.NAME] = provider", "def test_auth0_config_admin(testapp, registry):\n _test_auth_config(testapp, registry)", "def install_tomcat7_idp():\n print(sys._getframe().f_code.co_name)\n _install_packages([\n 'tomcat7', \n 'libmysql-java', \n 'libjstl1.1-java'\n ])\n \n # RENDER AND PUT idp.xml template into Catalina\n idpxml = _j2_env.get_template('tomcat7/idp.xml').render(\n idp_path = IDP_INSTALL_PATH,\n )\n _safe_put( \n StringIO(idpxml), \n TOMCAT_INST_PATH+'/Catalina/localhost/idp.xml'\n )\n \n _safe_put( \n TEMPLATE_DIR+'/tomcat7/server.xml', \n TOMCAT_INST_PATH+'/server.xml'\n )\n \n _safe_put( \n TEMPLATE_DIR+'/tomcat7/tomcat7', \n '/etc/default/tomcat7'\n )\n \n # installs addictional JARS\n _download_file( JSTL_DL_URL, TOMCAT_JAR_PATH, )\n \n commands = [\n #~ 'systemctl tomcat7 enable',\n 'update-rc.d tomcat7 enable',\n 'service tomcat7 restart'\n ]\n \n _run_safe_commands(commands)\n run('ln -sf /usr/share/java/mysql.jar /usr/share/tomcat7/lib/mysql.jar')", "def assertion_callback(_request, _uri, headers):\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')" ]
[ "0.70712364", "0.67885256", "0.6354537", "0.6346624", "0.61197853", "0.5923575", "0.5643076", "0.563825", "0.5628388", "0.55354863", "0.54767126", "0.5359936", "0.5318166", "0.5296389", "0.5250402", "0.5193824", "0.512464", "0.508345", "0.5081745", "0.50114036", "0.49877226", "0.49769607", "0.49637005", "0.49522707", "0.49423403", "0.49382955", "0.49262685", "0.49195066", "0.4895152", "0.48765677", "0.48518845", "0.485135", "0.48501897", "0.4842579", "0.48417944", "0.48414052", "0.48396638", "0.48279366", "0.481481", "0.48113233", "0.48085147", "0.47934958", "0.47832677", "0.47789362", "0.47661352", "0.4761046", "0.47472823", "0.4743033", "0.4731333", "0.47297505", "0.4726882", "0.4721062", "0.47187287", "0.47162288", "0.47127378", "0.47074145", "0.47050714", "0.47001922", "0.46987623", "0.46936902", "0.46895736", "0.46878624", "0.4682829", "0.467537", "0.46661362", "0.46502212", "0.46478626", "0.46452045", "0.4631082", "0.46266738", "0.46253008", "0.46181735", "0.46175417", "0.46167728", "0.46166554", "0.459547", "0.4594621", "0.4591472", "0.45852903", "0.45672095", "0.4566962", "0.456309", "0.45615938", "0.4560411", "0.45588133", "0.45577365", "0.45565063", "0.4555746", "0.4552438", "0.45522678", "0.45507446", "0.45484364", "0.45358902", "0.45287126", "0.4527064", "0.45017132", "0.4485704", "0.4483948", "0.4479489", "0.44785035" ]
0.58824545
6
Test that when we have a TPA provider which as an explicit maximum session length set, waiting for longer than that between requests results in us being logged out.
def test_login_with_testshib_provider_short_session_length(self): # Configure the provider with a 10-second timeout self._configure_testshib_provider(max_session_length=10) now = datetime.datetime.utcnow() with freeze_time(now): # Test the login flow, adding the user in the process self._test_login() # Wait 30 seconds; longer than the manually-set 10-second timeout later = now + datetime.timedelta(seconds=30) with freeze_time(later): # Test returning as a logged in user; this method verifies that we're logged out first. self._test_return_login(previous_session_timed_out=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inactive_session_timeout(self):\r\n email, password = self.STUDENT_INFO[0]\r\n self.login(email, password)\r\n\r\n # make sure we can access courseware immediately\r\n resp = self.client.get(reverse('dashboard'))\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n # then wait a bit and see if we get timed out\r\n time.sleep(2)\r\n\r\n resp = self.client.get(reverse('dashboard'))\r\n\r\n # re-request, and we should get a redirect to login page\r\n self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=' + reverse('dashboard'))", "def test_inactive_session_timeout(self):\r\n self.create_account(self.username, self.email, self.pw)\r\n self.activate_user(self.email)\r\n\r\n self.login(self.email, self.pw)\r\n\r\n # make sure we can access courseware immediately\r\n course_url = '/course/'\r\n resp = self.client.get_html(course_url)\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n # then wait a bit and see if we get timed out\r\n time.sleep(2)\r\n\r\n resp = self.client.get_html(course_url)\r\n\r\n # re-request, and we should get a redirect to login page\r\n self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=/course/')", "def test_timeout_not_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time()\n assert not connection._timeout_exceeded(start)", "def testSessionTimeout(self):\n\n def testTimeout(res):\n self.failUnlessEqual(res.value.args[0], b'404')\n\n def testCBTimeout(res):\n # check for terminate if we expire\n terminate = res[0].getAttribute('type',False)\n self.failUnlessEqual(terminate, 'terminate')\n\n def sendTest():\n sd = self.send()\n sd.addCallback(testCBTimeout)\n sd.addErrback(testTimeout)\n return sd\n\n def testResend(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n s = self.b.service.sessions[self.sid]\n self.failUnless(s.inactivity==2,'Wrong inactivity value')\n self.failUnless(s.wait==2, 'Wrong wait value')\n return task.deferLater(reactor, s.wait+s.inactivity+1, sendTest)\n\n def testSessionCreate(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n self.failUnless(res[0].hasAttribute('sid'),'Not session id')\n self.sid = res[0]['sid']\n\n # send and wait\n sd = self.send()\n sd.addCallback(testResend)\n return sd\n\n\n\n BOSH_XML = \"\"\"<body content='text/xml; charset=utf-8'\n hold='1'\n rid='%(rid)i'\n to='localhost'\n route='xmpp:127.0.0.1:%(server_port)i'\n ver='1.6'\n wait='2'\n ack='1'\n inactivity='2'\n xml:lang='en'\n xmlns='http://jabber.org/protocol/httpbind'/>\n \"\"\"% { \"rid\": self.rid, \"server_port\": self.server_port }\n\n return self.proxy.connect(BOSH_XML).addCallbacks(testSessionCreate)", "def test_timeout_invalid_start():\n connection = FakeBaseConnection(session_timeout=10)\n assert not connection._timeout_exceeded(start=0)", "def test_session_timeout_without_abort(self, exp_factory):\n exp1 = exp_factory(sid=\"s1\", timeout=1)\n exp1.start()\n spec = SequentialSpec(\"a\", \"b\", nslots=1, name=\"test\")\n mm1 = MatchMaker(spec, exp=exp1)\n group1 = mm1.match_to(\"test\")\n\n assert group1.me.role == \"a\"\n assert group1.mm.quota.nopen == 0\n assert group1.mm.quota.npending == 1\n\n time.sleep(1)\n assert exp1.session_expired\n assert not exp1.aborted\n\n exp2 = exp_factory(sid=\"s2\")\n exp2.start()\n spec = SequentialSpec(\"a\", \"b\", nslots=1, name=\"test\")\n mm2 = MatchMaker(spec, exp=exp2)\n group2 = mm2.match_to(\"test\")\n\n assert group2.me.role == \"a\"\n assert not exp2.aborted\n\n exp3 = exp_factory(sid=\"s3\")\n exp3.start()\n spec = SequentialSpec(\"a\", \"b\", nslots=1, name=\"test\")\n mm3 = MatchMaker(spec, exp=exp3)\n mm3.match_to(\"test\")\n assert exp3.aborted", "def test_timeout_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time() - 11\n try:\n connection._timeout_exceeded(start)\n except NetmikoTimeoutException as exc:\n assert isinstance(exc, NetmikoTimeoutException)\n return\n\n assert False", "def test_timeout(self):\n context = Context(SSLv23_METHOD)\n context.set_timeout(1234)\n assert context.get_timeout() == 1234", "async def test_validate_session(api_client: TestClient, coresys: CoreSys):\n with patch(\"aiohttp.web_request.BaseRequest.__getitem__\", return_value=None):\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": \"non-existing\"},\n )\n assert resp.status == 401\n\n with patch(\n \"aiohttp.web_request.BaseRequest.__getitem__\",\n return_value=coresys.homeassistant,\n ):\n resp = await api_client.post(\"/ingress/session\")\n result = await resp.json()\n\n assert \"session\" in result[\"data\"]\n session = result[\"data\"][\"session\"]\n assert session in coresys.ingress.sessions\n\n valid_time = coresys.ingress.sessions[session]\n\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": session},\n )\n assert resp.status == 200\n assert await resp.json() == {\"result\": \"ok\", \"data\": {}}\n\n assert coresys.ingress.sessions[session] > valid_time", "def test_wait_for_page_in_timeout(self):\n start_time = datetime.now()\n with self.assertRaises(SpdbError):\n csdb = CacheStateDB(self.config_data)\n ch = csdb.create_page_in_channel()\n\n csdb.wait_for_page_in([\"MY_TEST_KEY1\", \"MY_TEST_KEY2\"], ch, 1)\n\n assert (datetime.now() - start_time).seconds < 3", "def test_retriable_session():\n total = 5\n backoff_factor = 0.5\n session = retriable_session(total, backoff_factor)\n assert len(session.adapters) == 2\n assert 'https://' in session.adapters\n assert 'http://' in session.adapters\n assert session.adapters['https://'] == session.adapters['http://']\n assert session.adapters['https://'].max_retries.total == total\n assert session.adapters['https://'].max_retries.backoff_factor == backoff_factor", "def assert_timeout(self) -> None:", "def check_correct_usage(no_datastore, cookie_only_threshold):\n def minitest_divider(test):\n logger.debug('\\n\\n' + '-'*50)\n logger.debug(test + ' (nd=%s cot=%s)' % (no_datastore, cookie_only_threshold))\n\n st = SessionTester(no_datastore=no_datastore, cookie_only_threshold=cookie_only_threshold)\n expected_num_sessions_in_db_if_db_used = lambda a,b=0 : generic_expected_num_sessions_in_db_if_db_used(st, no_datastore, cookie_only_threshold, a, b)\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('try doing nothing (no session should be started)')\n st.noop()\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('start a session with a single write')\n st.start_request()\n str(st)\n assert st.get_expiration()==0, \"no session yet => no expiration yet\"\n assert st.is_active() is False\n st['x'] = 7\n assert st.is_active() is True\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider('start another session')\n st2 = SessionTester(st=st)\n st2.start_request()\n assert not st2.is_active()\n assert st2.get('x') is None, \"shouldn't get other session's data\"\n assert not st2.is_active(), \"still shouldn't be active - nothing set yet\"\n st2['x'] = 'st2x'\n assert st2.is_active()\n st2.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider('each session should get a unique sid')\n assert st2.ss.sid != st.ss.sid\n\n minitest_divider('we should still have the values we set earlier')\n st.start_request()\n str(st)\n assert_equal(st['x'], 7)\n st.finish_request_and_check()\n st2.start_request()\n assert_equal(st2['x'], 'st2x')\n st2.finish_request_and_check()\n\n minitest_divider(\"check get session by sid, save(True), and terminate()\")\n if cookie_only_threshold == 0:\n data1 = st.ss.data\n data2 = st2.ss.data\n else:\n # data is being stored in cookie-only form => won't be in the db\n data1 = data2 = {}\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data1)\n resp = st2.get_url('/get_by_sid?sid=%s' % st2.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data2)\n expected_num_sessions_in_db_if_db_used(2)\n st.start_request()\n st['y'] = 9 # make the session dirty\n st.save(True) # force it to persist to the db even though it normally wouldn't\n st.finish_request_and_check()\n\n # now the data should be in the db\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), st.ss.data)\n expected_num_sessions_in_db_if_db_used(2, 1)\n st.start_request()\n st.terminate() # remove it from the db\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider(\"should be able to terminate() and then start a new session all in one request\")\n st.start_request()\n st['y'] = 'yy'\n assert_equal(st.get('y'), 'yy')\n st.terminate()\n assert_raises(KeyError, st.__getitem__, 'y')\n st['x'] = 7\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n st.regenerate_id()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(initial_expir, st._get_expiration(), \"expiration should not change\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test w/new expiration time\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n new_expir = initial_expir + 120 # something new\n st.regenerate_id(expiration_ts=new_expir)\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(new_expir, st._get_expiration(), \"expiration should be what we asked for\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"check basic dictionary operations\")\n st.start_request()\n st['s'] = 'aaa'\n st['i'] = 99\n st['f'] = 4.37\n assert_equal(st.pop('s'), 'aaa')\n assert_equal(st.pop('s'), None)\n assert_equal(st.pop('s', 'nil'), 'nil')\n assert st.has_key('i')\n assert not st.has_key('s')\n assert_equal(st.get('i'), 99)\n assert_equal(st.get('ii'), None)\n assert_equal(st.get('iii', 3), 3)\n assert_equal(st.get('f'), st['f'])\n del st['f']\n assert_raises(KeyError, st.__getitem__, 'f')\n assert 'f' not in st\n assert 'i' in st\n assert_equal(st.get('x'), 7)\n st.clear()\n assert 'i' not in st\n assert 'x' not in st\n st.finish_request_and_check()\n\n minitest_divider(\"add complex data (models and objects) to the session\")\n st.start_request()\n st['model'] = make_entity(0)\n st['dict'] = dict(a='alpha', c='charlie', e='echo')\n st['list'] = ['b', 'd', 'f']\n st['set'] = set([2, 3, 5, 7, 11, 13, 17, 19])\n st['tuple'] = (7, 7, 1985)\n st.finish_request_and_check()\n st.start_request()\n st.clear()\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: basic usage\")\n st.start_request()\n st.set_quick('msg', 'mc only!')\n assert_equal('mc only!', st['msg'])\n st.finish_request_and_check()\n st.start_request()\n assert_equal('mc only!', st.pop_quick('msg'))\n assert_raises(KeyError, st.__getitem__, 'msg')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache (value will be lost if not using cookies)\")\n st.start_request()\n st.set_quick('a', 1)\n st.set_quick('b', 2)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if cookie_only_threshold > 0:\n assert_equal(st['a'], 1)\n assert_equal(st['b'], 2)\n else:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'b')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache should have no impact if another mutator is also used (and this ISNT memcache-only)\")\n st.start_request()\n st['x'] = 24\n st.set_quick('a', 1)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if no_datastore and cookie_only_threshold == 0:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'x')\n else:\n assert_equal(st['a'], 1)\n assert_equal(st['x'], 24)\n st.set_quick('msg', 'hello')\n st['z'] = 99\n st.finish_request_and_check()", "def sessiontimeout(self) :\n\t\ttry :\n\t\t\treturn self._sessiontimeout\n\t\texcept Exception as e:\n\t\t\traise e", "def test_timeout(self):\n # Uses a mocked version of EmailActivationTokenGenerator\n # so we can change the value of 'today'\n class Mocked(EmailActivationTokenGenerator):\n def __init__(self, today):\n self._today_val = today\n\n def _today(self):\n return self._today_val\n\n user = self.create_user()\n token_generator = EmailActivationTokenGenerator()\n token = token_generator.make_token(user)\n\n p1 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS))\n self.assertTrue(p1.check_token(user, token))\n\n p2 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS + 1))\n self.assertFalse(p2.check_token(user, token))", "def test_max_cookie_length(self):\n storage = self.get_storage()\n response = self.get_response()\n\n for i in range(5):\n storage.add(str(i) * 900)\n unstored_messages = storage.update(response)\n\n cookie_storing = self.stored_messages_count(storage, response)\n self.assertEqual(cookie_storing, 4)\n\n self.assertEqual(len(unstored_messages), 1)\n self.assert_(unstored_messages[0].message == '0' * 900)", "def test_pool_timeout_hw(self):\n self.test_pool_timeout()", "def test_api_livesession_video_no_stopped_at_cache_has_timeout(\n self,\n ):\n # set the start at current time minus 30 seconds\n started = int(to_timestamp(timezone.now())) - 30\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\"started_at\": str(started)},\n live_type=JITSI,\n )\n\n livesession = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n live_attendance={started + 10: {\"onStage\": 0}, started + 20: {\"muted\": 0}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n livesession.refresh_from_db()\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n response_json = {\n \"count\": 1,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(livesession.id),\n \"display_name\": \"[email protected]\",\n \"is_registered\": False,\n \"live_attendance\": {\n str(started): {},\n str(started + 15): {\"onStage\": 0},\n str(started + 30): {\"muted\": 0},\n },\n }\n ],\n }\n self.assertEqual(response.json(), response_json)\n\n with self.assertNumQueries(0):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), response_json)\n\n # go over the cache limit\n new_time = timezone.now() + timedelta(\n settings.VIDEO_ATTENDANCES_CACHE_DURATION + 1\n )\n with mock.patch.object(\n timezone, \"now\", return_value=new_time\n ), mock.patch.object(time, \"time\", return_value=int(to_timestamp(new_time))):\n # we call again the same request,\n # results are not identical\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.json(), response_json)", "def test_serverTimesOut(self):\n c = Clock()\n self.server.callLater = c.callLater\n\n def login():\n return self.client.login(b'testuser', b'password-test')\n\n def expireTime():\n c.advance(self.server.POSTAUTH_TIMEOUT * 2)\n\n d = self.connected.addCallback(strip(login))\n d.addCallback(strip(expireTime))\n\n # The loopback method's Deferred fires the connection is\n # closed, and the server closes the connection as a result of\n # expireTime.\n return defer.gatherResults([d, self.loopback()])", "def test_polling_plugin_timeout(self):\n pass", "def test_getallsessions_reliability(self):\n for _ in range(100):\n sessions = AudioUtilities.GetAllSessions()\n assert len(sessions) > 0", "def test_expires(self):\n storage = SessionStorage(timeout=0.001)\n session1 = storage['key']\n session1['value'] = 'example'\n session2 = storage['key']\n self.assertEquals('example', session2['value'])\n\n time.sleep(0.001)\n\n session3 = storage['key']\n self.assertNotIn('value', session3)", "def test_aio_can_login_to_web_portal(aio):", "def TODO_testTimeout(self):\n return \"\"\"TODO: Highly dependent on hardcoded downstream timeout val\"\"\"\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n\n self.client_send('get time0\\r\\n', 0)\n self.mock_recv('get time0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # downstream timeout logic should kick in,\n # without our mock server having to send anything.\n\n self.wait(210)\n\n self.client_recv('END\\r\\n', 0)\n\n # TODO: The number of server sessions should be 0,\n # except the close might not have propagated.", "def test_expired_pipeline(self):\n data = self.data()\n # provider is sent along request when request is made from mobile application\n data.pop(\"provider\")\n # to identify that request is made using browser\n data.update({\"social_auth_provider\": \"Google\"})\n response = self.client.post(self.url, data)\n self._assert_third_party_session_expired_error(\n response,\n \"Registration using {provider} has timed out.\".format(provider=\"Google\")\n )\n self._verify_user_existence(user_exists=False, social_link_exists=False)", "def test_timeout(self):\n session_id = self._open_session()\n\n # No alert to begin with\n alerts = HostContactAlert.filter_by_item(self.host)\n self.assertEqual(alerts.count(), 0)\n\n time.sleep(HostState.CONTACT_TIMEOUT + HostStatePoller.POLL_INTERVAL + RABBITMQ_GRACE_PERIOD)\n\n # Should be one SESSION_TERMINATE message to AMQP with a matching session ID\n message = self._receive_one_amqp()\n self.assertDictEqual(\n message,\n {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"SESSION_TERMINATE\",\n \"plugin\": self.PLUGIN,\n \"session_seq\": None,\n \"session_id\": session_id,\n \"body\": None,\n },\n )\n\n alerts = HostContactAlert.filter_by_item(self.host)\n self.assertEqual(alerts.count(), 1)\n\n # Should be a message waiting for the agent telling it that its session was terminated\n # (timing out doesn't mean the agent is gone, it could just be experiencing network difficulties)\n # What's more, the agent doesn't necessarily *know* that it had network difficulties, e.g. if it\n # just got real slow and waited too long between GETs.\n # This has to cut both ways to be reliable:\n # * We have to tell the agent that we thought it went away, by sending a TERMINATE for sessions\n # * If the agent finds that a GET fails then it has to assume that we might have put session\n # messages in that GET, and terminate all its sessions in case one of those GET messages\n # was really a TERMINATE\n response = self._get()\n self.assertResponseOk(response)\n forwarded_messages = response.json()[\"messages\"]\n self.assertEqual(len(forwarded_messages), 1)\n self.assertDictEqual(\n forwarded_messages[0],\n {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"SESSION_TERMINATE\",\n \"plugin\": self.PLUGIN,\n \"session_seq\": None,\n \"session_id\": None,\n \"body\": None,\n },\n )", "def onLoginTimeOut(self):\r\n\r\n self.pros +=1\r\n self.pb_load.setValue(self.pros * 4)\r\n # login timeout error\r\n if(self.pros == 25):\r\n self.check_timer.stop()", "def test_timeout(self):\n # Attempt connection with short timeout\n with self.assertRaises(requests.exceptions.ReadTimeout):\n a = api.InvenTreeAPI(SERVER, username=USERNAME, password=PASSWORD, timeout=0.001) # noqa: F841", "def test_set_session():", "def get_test_timeout(self):\n return None", "def test_twice_logging_in(test_client, test_session):\n tokens = []\n for _ in range(2):\n with patch(\"validators.authentication.session\", test_session):\n with patch(\"views.login.session\", test_session):\n payload = {\"username\": \"testuser1\", \"password\": \"Qwerty123_\"}\n response = test_client.post(\"api/v1/login\", data=payload)\n assert response.status_code == 200\n tokens.append(response.json()[\"access_token\"])\n time.sleep(1)\n assert tokens[0] != tokens[1]", "def testEnsurePlaybacksAreLimited(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasUnlimitedPlaybacks)", "def test_serverTimeout(self):\n c = Clock()\n self.server.timeoutTest = True\n self.client.timeout = 5 #seconds\n self.client.callLater = c.callLater\n self.selectedArgs = None\n\n def login():\n d = self.client.login(b'testuser', b'password-test')\n c.advance(5)\n d.addErrback(timedOut)\n return d\n\n def timedOut(failure):\n self._cbStopClient(None)\n failure.trap(error.TimeoutError)\n\n d = self.connected.addCallback(strip(login))\n d.addErrback(self._ebGeneral)\n return defer.gatherResults([d, self.loopback()])", "def test_api_livesession_video_ended_cache_no_timeout(\n self,\n ):\n started = int(to_timestamp(timezone.now())) - 1000\n\n video = VideoFactory(\n live_state=STOPPED,\n live_info={\"started_at\": str(started), \"stopped_at\": str(started + 30)},\n live_type=JITSI,\n )\n\n livesession = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n live_attendance={started + 10: {\"onStage\": 0}, started + 20: {\"muted\": 0}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n livesession.refresh_from_db()\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n response_json = {\n \"count\": 1,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(livesession.id),\n \"display_name\": \"[email protected]\",\n \"is_registered\": False,\n \"live_attendance\": {\n str(started): {},\n str(started + 15): {\"onStage\": 0},\n str(started + 30): {\"muted\": 0},\n },\n }\n ],\n }\n self.assertEqual(response.json(), response_json)\n\n # go over the cache limit\n new_time = timezone.now() + timedelta(\n settings.VIDEO_ATTENDANCES_CACHE_DURATION + 1\n )\n with mock.patch.object(\n timezone, \"now\", return_value=new_time\n ), mock.patch.object(time, \"time\", return_value=int(to_timestamp(new_time))):\n # cache has no timeout\n with self.assertNumQueries(0):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), response_json)", "def get_timeout(self) -> int:", "def sessiontimeout(self, sessiontimeout) :\n\t\ttry :\n\t\t\tself._sessiontimeout = sessiontimeout\n\t\texcept Exception as e:\n\t\t\traise e", "def test_persistent_timeout(self):\n \n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/index.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"keep-alive\")#Not even necessary, same effect as nothing in the rfc\n self.client_socket.send(str(request).encode())\n\n\n # Remove the response from the buffer\n message = self.client_socket.recv(1024)\n\n # Test if the connection is still alive\n self.client_socket.send(str(request).encode())\n message = self.client_socket.recv(1024)\n self.assertTrue(message)\n\n #Wait\n time.sleep(25)\n\n\n # Test if the connection is still alive\n self.client_socket.send(str(request).encode())\n message = self.client_socket.recv(1024)\n self.assertFalse(message)\n\n #Restart connection, just to prevent tearDown from throwing an exception\n self.setUp()", "def on_timeout(self):\n self.logger.debug('id=%d, Session timed out!', self.id)\n self.close(SessionCloseErrorCode.SESSION_DIED)", "def _check_timeouts(self):\n\n expired_tokens = []\n for token in self._capability_timeouts:\n interval = datetime.utcnow() - self._capability_timeouts[token]\n if interval.total_seconds() >= 10:\n expired_tokens.append(token)\n\n for token in expired_tokens:\n cap_withdraw = mplane.model.Withdrawal(capability=self._capabilities[token])\n self.handle_message(cap_withdraw, self.identity_for(token))", "def check_total_sessions_exceeded(self) -> None:\n if self.project.sessions_total is None:\n # non-failure, Session is not blocked from being created due to total Sessions being exceeded as there is no\n # limit\n return\n\n total_session_count = self.get_total_session_count()\n if self.project.sessions_total <= total_session_count:\n raise SessionException(\n \"Unable to create new sessions for the project. {}/{} have already been created.\".format(\n total_session_count, self.project.sessions_total\n )\n )", "def ds_token_ok(buffer_min=10):\n required = {'ds_expiration', 'ds_access_token', 'ds_account_id'}\n session_keys = set(session.keys())\n ok = session_keys.intersection(required) == required\n if ok:\n token_expiration = session.get(\"ds_expiration\")\n buffer_starts = token_expiration - timedelta(minutes=buffer_min)\n ok = ok and buffer_starts > pytz.utc.localize(datetime.utcnow())\n return ok", "def test_sessions():\n CHECKS = (check_correct_usage, check_expiration, check_bad_cookie, check_various_session_sizes)\n for no_datastore in (False, True):\n if no_datastore:\n test_db = 'without'\n else:\n test_db = 'with'\n for cot in (0, 10*1024, 2**30):\n if cot == 0:\n test_cookie = 'no data stored in cookies'\n elif cot == 2**30:\n test_cookie = 'data only stored in cookies'\n else:\n test_cookie = 'store data in cookies when its encoded size<=%dB' % cot\n for check in CHECKS:\n logger.debug('\\n\\n' + '*'*50)\n logger.debug('Running %s %s datastore and %s' % (check.__name__, test_db, test_cookie))\n yield check, no_datastore, cot", "async def test_validate_session_with_user_id(\n api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock\n):\n with patch(\"aiohttp.web_request.BaseRequest.__getitem__\", return_value=None):\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": \"non-existing\"},\n )\n assert resp.status == 401\n\n with patch(\n \"aiohttp.web_request.BaseRequest.__getitem__\",\n return_value=coresys.homeassistant,\n ):\n ha_ws_client.async_send_command.return_value = [\n {\"id\": \"some-id\", \"name\": \"Some Name\", \"username\": \"sn\"}\n ]\n\n resp = await api_client.post(\"/ingress/session\", json={\"user_id\": \"some-id\"})\n result = await resp.json()\n\n assert {\"type\": \"config/auth/list\"} in [\n call.args[0] for call in ha_ws_client.async_send_command.call_args_list\n ]\n\n assert \"session\" in result[\"data\"]\n session = result[\"data\"][\"session\"]\n assert session in coresys.ingress.sessions\n\n valid_time = coresys.ingress.sessions[session]\n\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": session},\n )\n assert resp.status == 200\n assert await resp.json() == {\"result\": \"ok\", \"data\": {}}\n\n assert coresys.ingress.sessions[session] > valid_time\n\n assert session in coresys.ingress.sessions_data\n assert coresys.ingress.get_session_data(session).user.id == \"some-id\"\n assert coresys.ingress.get_session_data(session).user.username == \"sn\"\n assert (\n coresys.ingress.get_session_data(session).user.display_name == \"Some Name\"\n )", "def test_lock_timeout():\n lock_unlock_timeout(0)", "def change_session_timeout(duthost1, duthost2, keep_and_peer_link_member):\n cmd = 'config mclag session-timeout {} {}'\n keep_alive_interface = keep_and_peer_link_member[duthost1.hostname]['keepalive']\n duthost1.shell(cmd.format(MCLAG_DOMAINE_ID, NEW_SESSION_TIMEOUT))\n duthost2.shell(cmd.format(MCLAG_DOMAINE_ID, NEW_SESSION_TIMEOUT))\n duthost1.shutdown(keep_alive_interface)\n\n yield\n\n duthost1.shell(cmd.format(MCLAG_DOMAINE_ID, DEFAULT_SESSION_TIMEOUT))\n duthost2.shell(cmd.format(MCLAG_DOMAINE_ID, DEFAULT_SESSION_TIMEOUT))\n duthost1.no_shutdown(keep_alive_interface)", "def check_session(session_id, timestamp, elapse_limit=600):\n the_user = col_user.find_one({\"session_key.session_id\": session_id})\n if not the_user:\n return False\n\n the_timestamp = the_user[\"session_key\"].get(\"timestamp\")\n current_timestamp = convert_to_bson_timestamp(timestamp)\n\n elapsed = current_timestamp.time - the_timestamp.time\n if elapsed >= elapse_limit:\n return False\n return the_user", "def is_session_valid(self, logonTimestamp):\n time_diff = time.time() - logonTimestamp\n return (time_diff / 60) < self.session_time_limit", "def test_only_last_token_is_valid(test_client, test_session):\n tokens = []\n for _ in range(randint(2, 10)):\n with patch(\"validators.authentication.session\", test_session):\n with patch(\"views.login.session\", test_session):\n payload = {\"username\": \"testuser1\", \"password\": \"Qwerty123_\"}\n response1 = test_client.post(\"api/v1/login\", data=payload)\n tokens.append(response1.json()[\"access_token\"])\n time.sleep(1)\n\n status_codes = []\n for token in tokens:\n with patch(\"validators.authentication.session\", test_session):\n with patch(\"views.items.session\", test_session):\n headers = {\"Authorization\": f\"Bearer {token}\"}\n response = test_client.get(\"/api/v1/items\", headers=headers)\n status_codes.append(response.status_code)\n last_code = status_codes.pop()\n assert last_code == 200\n assert set(status_codes) == {401}", "def test__API_with_wrong_answer(self):\n self.mock_connection.state = MockConnection.WRONG_NUM_OF_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex", "def test_timeout_setting(self):\n self.assertEqual(self.es.sse_kwargs.get('timeout'),\n config.socket_timeout)", "def test_max_workers(self):\r\n from concurrent.futures import ThreadPoolExecutor\r\n session = FuturesSession()\r\n self.assertEqual(session.executor._max_workers, 2)\r\n session = FuturesSession(max_workers=5)\r\n self.assertEqual(session.executor._max_workers, 5)\r\n session = FuturesSession(executor=ThreadPoolExecutor(max_workers=10))\r\n self.assertEqual(session.executor._max_workers, 10)\r\n session = FuturesSession(executor=ThreadPoolExecutor(max_workers=10),\r\n max_workers=5)\r\n self.assertEqual(session.executor._max_workers, 10)", "def test_server_timeout():\r\n with throttle_client(b\"[semaphores]\\nA=1\") as client:\r\n one = client.new_peer(expires_in=timedelta(minutes=1))\r\n two = client.new_peer(expires_in=timedelta(minutes=1))\r\n # Acquire first lease\r\n _ = client.acquire(one, \"A\")\r\n\r\n # Wait for two in a seperate thread so we do not block forever if this test\r\n # fails.\r\n def wait_for_two():\r\n client.acquire(two, \"A\", block_for=timedelta(seconds=0.5))\r\n\r\n t = Thread(target=wait_for_two)\r\n t.start()\r\n\r\n # Three seconds should be ample time for `t` to return\r\n t.join(3)\r\n # If `t` is alive, the join timed out, which should not be the case\r\n assert not t.is_alive()", "def test_timeout_kwarg():\n\n testutil.add_response(\"login_response_200\")\n testutil.add_response(\"query_response_200\")\n testutil.add_response(\"logout_response_200\")\n\n client_args = {\n \"username\": testutil.username,\n \"password\": testutil.password,\n \"client_id\": testutil.client_id,\n \"client_secret\": testutil.client_secret,\n \"version\": \"37.0\",\n \"timeout\": \"10\"}\n\n with sfdc.client(**client_args) as client:\n qr = client.query(\"SELECT Id, Name FROM Account LIMIT 10\")\n assert qr[1].timeout == 10.0, 'Timeout value in request is different to client kwarg value'", "def test_expired_credentials():\n pass", "def check_session_queue_full(self) -> None:\n if (\n self.project.sessions_queued is None\n ): # no limit set so always return (success)\n return\n\n queued_request_count = self.project.session_requests.count()\n if queued_request_count >= self.project.sessions_queued:\n raise SessionException(\n \"There are already {}/{} requests for sessions for this project.\".format(\n queued_request_count, self.project.sessions_queued\n )\n )", "async def test_expired_session(aresponses, create_session_response):\n aresponses.add(\n \"production.tile-api.com\",\n f\"/api/v1/clients/{TILE_CLIENT_UUID}\",\n \"put\",\n aresponses.Response(\n text=load_fixture(\"create_client_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n aresponses.add(\n \"production.tile-api.com\",\n f\"/api/v1/clients/{TILE_CLIENT_UUID}/sessions\",\n \"post\",\n aresponses.Response(\n text=json.dumps(create_session_response),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n aresponses.add(\n \"production.tile-api.com\",\n f\"/api/v1/clients/{TILE_CLIENT_UUID}\",\n \"put\",\n aresponses.Response(\n text=load_fixture(\"create_client_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n aresponses.add(\n \"production.tile-api.com\",\n f\"/api/v1/clients/{TILE_CLIENT_UUID}/sessions\",\n \"post\",\n aresponses.Response(\n text=json.dumps(create_session_response),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n aresponses.add(\n \"production.tile-api.com\",\n \"/api/v1/tiles/tile_states\",\n \"get\",\n aresponses.Response(\n text=load_fixture(\"tile_states_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n aresponses.add(\n \"production.tile-api.com\",\n f\"/api/v1/tiles/{TILE_TILE_UUID}\",\n \"get\",\n aresponses.Response(\n text=load_fixture(\"tile_details_response.json\"),\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n api = await async_login(\n TILE_EMAIL, TILE_PASSWORD, session, client_uuid=TILE_CLIENT_UUID\n )\n\n # Simulate an expired session:\n api._session_expiry = int(time() * 1000) - 1000000\n await api.async_get_tiles()", "def test_im_chat_sessions(self):\n pass", "def test_access_token_in_session_after_login(self, client, valid_otp_data):\n\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n\n session_resp = client.get(\"/view_session\")\n assert \"access_token\" in session_resp.json()", "def test_max_used_thread_token_is_valid(self):\n self.token.use_count = const.MAX_TOKEN_USE_COUNT\n assert not self.token.is_valid()", "def test_auth(self):\n self.api.auth()\n self.assertIsNotNone(self.api.session, msg=\"auth() doesn't return a session\")\n\n self.api.request('logout')\n self.assertIn('list', self.api.request('sys.settings.get').data,\n msg=\"auth() doesn't restore sessions expired\")", "def test_spa_get_bad_token(app, client, get_message):\n with capture_flashes() as flashes:\n with capture_passwordless_login_requests() as requests:\n response = client.post(\n \"/login\",\n json=dict(email=\"[email protected]\"),\n headers={\"Content-Type\": \"application/json\"},\n )\n assert response.headers[\"Content-Type\"] == \"application/json\"\n token = requests[0][\"login_token\"]\n time.sleep(1)\n\n response = client.get(\"/login/\" + token)\n assert response.status_code == 302\n split = urlsplit(response.headers[\"Location\"])\n assert \"localhost:8081\" == split.netloc\n assert \"/login-error\" == split.path\n qparams = dict(parse_qsl(split.query))\n assert all(k in qparams for k in [\"email\", \"error\", \"identity\"])\n\n msg = get_message(\"LOGIN_EXPIRED\", within=\"1 milliseconds\", email=\"[email protected]\")\n assert msg == qparams[\"error\"].encode(\"utf-8\")\n\n # Test mangled token\n token = (\n \"WyIxNjQ2MzYiLCIxMzQ1YzBlZmVhM2VhZjYwODgwMDhhZGU2YzU0MzZjMiJd.\"\n \"BZEw_Q.lQyo3npdPZtcJ_sNHVHP103syjM\"\n \"&url_id=fbb89a8328e58c181ea7d064c2987874bc54a23d\"\n )\n response = client.get(\"/login/\" + token)\n assert response.status_code == 302\n split = urlsplit(response.headers[\"Location\"])\n assert \"localhost:8081\" == split.netloc\n assert \"/login-error\" == split.path\n qparams = dict(parse_qsl(split.query))\n assert len(qparams) == 1\n assert all(k in qparams for k in [\"error\"])\n\n msg = get_message(\"INVALID_LOGIN_TOKEN\")\n assert msg == qparams[\"error\"].encode(\"utf-8\")\n assert len(flashes) == 0", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "async def test_find_link_tag_max_size(hass: HomeAssistant, mock_session) -> None:\n text = \"\".join(\n [\n '<link rel=\"redirect_uri\" href=\"/wine\">',\n (\"0\" * 1024 * 10),\n '<link rel=\"redirect_uri\" href=\"/beer\">',\n ]\n )\n mock_session.get(\"http://127.0.0.1:8000\", text=text)\n redirect_uris = await indieauth.fetch_redirect_uris(hass, \"http://127.0.0.1:8000\")\n\n assert redirect_uris == [\"http://127.0.0.1:8000/wine\"]", "def timedOut(self):\n return self.result() == TIMEOUT", "def time_limit(self):\n return 2503", "def test_passive(self):\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = '/my/fancy/url'\n\n # Continue URL.\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, content_type = self._run_test(\n host, path_info, cookie_dict, action, set_email, set_admin,\n continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n self.assertIsInstance(content_type, str)", "def check_session(col_user, session_id, timestamp, elapse_limit=60):\n the_user = col_user.find_one({\"session_key.session_id\": session_id})\n if not the_user:\n return False\n\n the_timestamp = the_user[\"session_key\"].get(\"timestamp\")\n current_timestamp = convert_to_bson_timestamp(timestamp)\n\n elapsed = current_timestamp.time - the_timestamp.time\n if elapsed >= elapse_limit:\n return False\n return the_user", "async def tus_check_session(request: web.Request) -> web.Response:\n ctx: Context = request.app[\"ctx\"]\n secret = ctx.local_config[\"storage-proxy\"][\"secret\"]\n async with check_params(\n request,\n t.Dict(\n {\n t.Key(\"token\"): tx.JsonWebToken(\n secret=secret, inner_iv=upload_token_data_iv\n ),\n }\n ),\n read_from=CheckParamSource.QUERY,\n ) as params:\n token_data = params[\"token\"]\n async with ctx.get_volume(token_data[\"volume\"]) as volume:\n headers = await prepare_tus_session_headers(request, token_data, volume)\n return web.Response(headers=headers)", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "async def test_wait_for_activity_timeout(aiopg_connector):\n pg_app = app.App(connector=aiopg_connector)\n worker = worker_module.Worker(app=pg_app, timeout=2)\n worker.notify_event = asyncio.Event()\n task = asyncio.ensure_future(worker.single_worker(worker_id=0))\n try:\n await asyncio.sleep(0.2) # should be enough so that we're waiting\n\n worker.stop_requested = True\n\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(task, timeout=0.2)\n finally:\n worker.notify_event.set()", "def test_exceed_limit_request(self):\n actions.login(ADMIN_EMAIL)\n ids_list = list(range(SkillAggregateRestHandler.MAX_REQUEST_SIZE))\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': ids_list}, True))\n\n response = transforms.loads(self.get(get_url).body)\n self.assertEqual(412, response['status'])", "def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def test_session_promotion(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 0)\")\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def testVerifyArbitraryLimits(self):\n\t\tpolicy = MinimumPlaybackPolicy(3)\n\t\tfor x in range(0, 3):\n\t\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\t\tself.failIf(policy.isReadyForRemoval)\n\t\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)", "def test_session_not_accessed(self):\n response = self.client.get(\"/auth_processor_no_attr_access/\")\n self.assertContains(response, \"Session not accessed\")", "def test_gatt_request_max_mtu(self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n try:\n bluetooth_gatt, gatt_callback, adv_callback = (\n orchestrate_gatt_connection(self.cen_ad, self.per_ad))\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n self.adv_instances.append(adv_callback)\n expected_mtu = gatt_mtu_size['max']\n self.cen_ad.droid.gattClientRequestMtu(bluetooth_gatt, expected_mtu)\n if not self._verify_mtu_changed_on_client_and_server(\n expected_mtu, gatt_callback, gatt_server_cb):\n return False\n return self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)", "def auth_token_provider_so_timeout(self, auth_token_provider_so_timeout):\n\n self._auth_token_provider_so_timeout = auth_token_provider_so_timeout", "def test_swact_attempt_timeout(self):\n\n # mock the get_host queries\n # all remaining queries, the host returns 'Controller-Standby'\n self.sysinv_client.get_host.side_effect = itertools.chain(\n itertools.repeat(self.CONTROLLER_STANDBY))\n\n # mock the API call as successful on the subcloud\n self.sysinv_client.swact_host.return_value = self.CONTROLLER_SWACTING\n\n # invoke the strategy state operation on the orch thread\n self.worker.perform_state_action(self.strategy_step)\n\n # verify the swact command was actually attempted\n self.sysinv_client.swact_host.assert_called()\n\n # verify the query was invoked: 1 + max_attempts times\n self.assertEqual(swact_host.DEFAULT_MAX_QUERIES + 2,\n self.sysinv_client.get_host.call_count)\n\n # verify that state failed due to subcloud never finishing the swact\n self.assert_step_updated(self.strategy_step.subcloud_id,\n consts.STRATEGY_STATE_FAILED)", "def check_timeout(flag: Callable, limit: float) -> bool:\n timed_out = False\n if HAS_SUPERVISOR:\n start = supervisor.ticks_ms()\n while not timed_out and not flag():\n if ticks_diff(supervisor.ticks_ms(), start) >= limit * 1000:\n timed_out = True\n else:\n start = time.monotonic()\n while not timed_out and not flag():\n if time.monotonic() - start >= limit:\n timed_out = True\n return timed_out", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "async def test_cdpsession_non_json_response(cdp):\n with cdp.method_subscription([\"finished\"]) as queue:\n await cdp.send(\"\", await_response=False)\n try:\n await asyncio.wait_for(queue.get(), timeout=5)\n except asyncio.TimeoutError:\n assert not cdp.listening_stopped.is_set()", "def test_api_livesession_read_token_lti_admin_instruct_token_email_none(self):\n # livesession with consumer_site\n livesession = LiveSessionFactory(\n is_registered=True,\n is_from_lti_connection=True,\n email=\"[email protected]\", # explicit to be found in response\n video__playlist__lti_id=\"Maths\", # explicit to be found in response\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\", # explicit to be found in response\n )\n # token with right context_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=livesession.video.playlist,\n context_id=str(livesession.video.playlist.lti_id),\n consumer_site=str(livesession.consumer_site.id),\n user__email=None,\n user__id=f\"{livesession.lti_user_id}_diff\",\n )\n\n response = self.client.get(\n self._get_url(livesession.video, livesession),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json(),\n {\n \"anonymous_id\": None,\n \"consumer_site\": str(livesession.video.playlist.consumer_site.id),\n \"display_name\": None,\n \"email\": \"[email protected]\",\n \"id\": str(livesession.id),\n \"is_registered\": True,\n \"language\": \"en\",\n \"live_attendance\": None,\n \"lti_user_id\": \"56255f3807599c377bf0e5bf072359fd\",\n \"lti_id\": \"Maths\",\n \"should_send_reminders\": True,\n \"username\": None,\n \"video\": str(livesession.video.id),\n },\n )", "def test_team_size_limit():\n app = create_ctfd(user_mode=\"teams\")\n app.config.update(\n {\n \"OAUTH_CLIENT_ID\": \"ctfd_testing_client_id\",\n \"OAUTH_CLIENT_SECRET\": \"ctfd_testing_client_secret\",\n \"OAUTH_AUTHORIZATION_ENDPOINT\": \"http://auth.localhost/oauth/authorize\",\n \"OAUTH_TOKEN_ENDPOINT\": \"http://auth.localhost/oauth/token\",\n \"OAUTH_API_ENDPOINT\": \"http://api.localhost/user\",\n }\n )\n with app.app_context():\n set_config(\"team_size\", 1)\n team = gen_team(app.db, member_count=1, oauth_id=1234)\n team_id = team.id\n login_with_mlc(\n app, team_name=\"team_name\", team_oauth_id=1234, raise_for_error=False\n )\n assert len(Teams.query.filter_by(id=team_id).first().members) == 1\n\n set_config(\"team_size\", 2)\n login_with_mlc(app, team_name=\"team_name\", team_oauth_id=1234)\n assert len(Teams.query.filter_by(id=team_id).first().members) == 2\n destroy_ctfd(app)", "def test_next_token(self) -> None:\n\n # `next_token` does not appear\n # Number of results is the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=20\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does not appear\n # Number of max results is larger than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=21\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does appear\n # Number of max results is smaller than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 19)\n self.assertEqual(channel.json_body[\"next_token\"], 19)\n\n # Check\n # Set `from` to value of `next_token` for request remaining entries\n # `next_token` does not appear\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 1)\n self.assertNotIn(\"next_token\", channel.json_body)", "def test_login_session_check(self):\r\n\t\tprint(\"\")\r\n\t\tprint(\"`login_session_check` method tests\")\r\n\t\tprint(\"---------------------\")\r\n\t\tprint(\"Test: `login_session_check: logged in`\")\r\n\t\tpath = 'login'\r\n\t\twith requests_mock.mock() as m:\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":true,\r\n\t\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-07-30\",\r\n\t\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\t\"USER\":\"restuser\",\r\n\t\t\t\t\t\t\t\"USER_ID\":2,\r\n\t\t\t\t\t\t\t\"expired_pwd\":false\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t],\r\n\t\t\t\t\t\"success\":true\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == True\r\n\t\t\tassert session_check[1]['FORCE_PWD_CHANGE'] == True\r\n\t\t\tassert session_check[1]['LAST_ACCT'] == 1\r\n\t\t\tassert session_check[1]['NEXT_PWNED'] == None\r\n\t\t\tassert session_check[1]['ROOT'] == True\r\n\t\t\tassert session_check[1]['USER_ID'] == 2\r\n\t\t\tassert session_check[1]['USER'] == 'restuser'\r\n\t\t\tassert session_check[1]['expired_pwd'] == False\r\n\t\t\tprint(\"Passed!!!\")\r\n\t\t\tprint(\"Test: `login_session_check: not logged in`\")\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\t[],\r\n\t\t\t\t\t\"success\":false\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == False\r\n\t\t\tassert not session_check[1] # dictionary should be empty\r\n\t\tprint(\"Passed!!!\")", "def checkTimeout(self):\n if TIMEOUT <= (datetime.now() - self.clockCheckStop).total_seconds():\n print('Didn\\'t received messages for 1 minute - Program ends')\n exit(0)", "def test_purge_anonymous_session(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session \"\r\n \"VALUES ('123456', 0, %s)\",\r\n (0,))\r\n cursor.execute(\"INSERT INTO session \"\r\n \"VALUES ('987654', 0, %s)\",\r\n (time.time() - PURGE_AGE - 3600,))\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('987654', 0, 'foo', 'bar')\")\r\n \r\n # We need to modify a different session to trigger the purging\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=Cookie())\r\n session = Session(self.env, req)\r\n session['foo'] = 'bar'\r\n session.save()\r\n\r\n cursor.execute(\"SELECT COUNT(*) FROM session WHERE sid='987654' AND \"\r\n \"authenticated=0\")\r\n self.assertEqual(0, cursor.fetchone()[0])", "def has_session_expired(session_data, _date=datetime.now()):\n remain = get_session_time_remaining(session_data, _date)\n if remain:\n return not (remain > timedelta())\n return True", "def test_password_reset_frequency_limit(self):\r\n staff_email, _ = self._setup_user(is_staff=True)\r\n\r\n success_msg = 'Your Password Reset is Complete'\r\n\r\n # try to reset password, it should fail\r\n user = User.objects.get(email=staff_email)\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo',\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n success_msg,\r\n resp.content\r\n )\r\n\r\n # pretend we're in the future\r\n staff_reset_time = timezone.now() + timedelta(days=1)\r\n with freeze_time(staff_reset_time):\r\n user = User.objects.get(email=staff_email)\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo',\r\n }, follow=True)\r\n\r\n self.assertIn(\r\n success_msg,\r\n resp.content\r\n )", "def test_validate_callback_timeout(self):\n with patch('requests.get') as mock:\n mock.side_effect = requests.exceptions.Timeout\n with self.assertRaises(InvalidProxyCallback):\n ProxyGrantingTicket.objects.validate_callback('http://www.example.com/', 'https://www.example.org/',\n self.pgtid, self.pgtiou)", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def test_new_session(self):\r\n cookie = Cookie()\r\n req = Mock(incookie=Cookie(), outcookie=cookie, authname='anonymous',\r\n base_path='/')\r\n session = Session(self.env, req)\r\n self.assertEqual(session.sid, cookie['trac_session'].value)\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT COUNT(*) FROM session\")\r\n self.assertEqual(0, cursor.fetchone()[0])", "def test_timeout(self):\n now = datetime.datetime.now()\n channel = ChannelStatus.get_channel(channel_spec=self.channel)\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n now += datetime.timedelta(seconds=REGREET_TIMEOUT) + datetime.timedelta(seconds=100)\n\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n self.assertEqual(True, greet)", "def test_sleep_request(self):\n date = datetime.now() - timedelta(minutes=14)\n RequestAPI.objects.create(total_request=450, date=date)\n start = time.time()\n ManagerRequestApiTwitter().handle_rate_limit()\n stop = time.time()\n total_time = stop - start\n self.assertGreater(total_time, 60)", "def test_login_nth_time(self):\n index = self.client.get(\"/\").data.decode()\n self.assertIn(\"Sign-in\", index, \"We are not logged in\")\n with self.logged_in():\n index = self.client.get(\"/\").data.decode()\n self.assertNotIn(\"Sign-in\", index, \"We are logged in\")\n self.assertIn(\"Hi octocat!\", index, \"We are logged in\")\n\n users = self.Models.User.query.all()\n self.assertEqual(len(users), 3, \"There should be balmas, ponteineptique and octocat\")\n self.client.get(\"/logout\")\n\n with self.logged_in():\n index = self.client.get(\"/\").data.decode()\n self.assertNotIn(\"Sign-in\", index, \"We are logged in\")\n self.assertIn(\"Hi octocat!\", index, \"We are logged in\")\n\n users = self.Models.User.query.all()\n self.assertEqual(len(users), 3, \"There should be balmas, ponteineptique and octocat\")", "def session_required(func): \n def check_session(self, session, *args, **kwargs):\n if auth_manager().is_session_valid(session) or cmp(session, \"SessionForTest\") == 0:\n return func(self, session, *args, **kwargs)\n else:\n return xen_api_error(['SESSION_INVALID', session])\n\n return check_session" ]
[ "0.67007613", "0.6563989", "0.6529781", "0.63517386", "0.6243885", "0.6232972", "0.6106514", "0.6026753", "0.5952208", "0.59390306", "0.5926259", "0.58408374", "0.58186436", "0.5785342", "0.57843", "0.57781994", "0.57300466", "0.572285", "0.57226133", "0.5715555", "0.57084894", "0.56952065", "0.56734866", "0.5662905", "0.56319326", "0.5631651", "0.56138325", "0.5601006", "0.5587145", "0.55706555", "0.5570237", "0.5564146", "0.5554688", "0.5520323", "0.5519942", "0.54977155", "0.5493526", "0.5474511", "0.54144347", "0.5412448", "0.5385804", "0.5379654", "0.53700745", "0.5367586", "0.53611904", "0.53604704", "0.5360356", "0.5342201", "0.5336459", "0.5335449", "0.5334945", "0.5329156", "0.5326399", "0.5324486", "0.5323639", "0.52921844", "0.5283009", "0.5281113", "0.5266738", "0.5227769", "0.5223404", "0.52139276", "0.5211555", "0.52113974", "0.5196559", "0.51963735", "0.5195178", "0.5193537", "0.51929015", "0.51869977", "0.51657003", "0.51634806", "0.51562756", "0.51562756", "0.51465625", "0.5130449", "0.5128823", "0.5125403", "0.51247495", "0.5122419", "0.51198715", "0.5118796", "0.5118796", "0.5117542", "0.5115559", "0.51136994", "0.5104081", "0.5088334", "0.5083437", "0.5078789", "0.5071699", "0.5066165", "0.506614", "0.50626546", "0.5062031", "0.5061964", "0.50617355", "0.5061267", "0.5059986", "0.50561714" ]
0.77491003
0
Mock out HTTP calls to various endpoints using httpretty.
def setUp(self): super().setUp() # Mock the call to the SAP SuccessFactors assertion endpoint SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp' def assertion_callback(_request, _uri, headers): """ Return a fake assertion after checking that the input is what we expect. """ assert b'private_key=fake_private_key_here' in _request.body assert b'user_id=myself' in _request.body assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body return (200, headers, 'fake_saml_assertion') httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback) SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp' def bad_callback(_request, _uri, headers): """ Return a 404 error when someone tries to call the URL. """ return (404, headers, 'NOT AN ASSERTION') httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback) # Mock the call to the SAP SuccessFactors token endpoint SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token' def token_callback(_request, _uri, headers): """ Return a fake assertion after checking that the input is what we expect. """ assert b'assertion=fake_saml_assertion' in _request.body assert b'company_id=NCC1701D' in _request.body assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body return (200, headers, '{"access_token": "faketoken"}') httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback) # Mock the call to the SAP SuccessFactors OData user endpoint ODATA_USER_URL = ( 'http://api.successfactors.com/odata/v2/User(userId=\'myself\')' '?$select=firstName,lastName,defaultFullName,email' ) def user_callback(request, _uri, headers): auth_header = request.headers.get('Authorization') assert auth_header == 'Bearer faketoken' return ( 200, headers, json.dumps({ 'd': { 'username': 'jsmith', 'firstName': 'John', 'lastName': 'Smith', 'defaultFullName': 'John Smith', 'email': '[email protected]', 'country': 'Australia', } }) ) httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def httpretty():\n import httpretty\n httpretty.enable()\n yield httpretty\n httpretty.disable()", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.get(rest_url)", "def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'", "def test_api_get(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('GET', url, status=200, body=b'some xml and stuff')\n response = new_job.request('get', url, expected_response=200)\n assert response == b'some xml and stuff'", "def test_services_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_service_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service',\n json=expected_response,\n status=200\n )\n resp = requests.get(\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n expected_url = f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service'\n assert responses.calls[0].request.url == expected_url\n assert \"MY-SERVICE-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def stub_http(hass):\n mock_http_component(hass)", "def api_test(count):\n # Log all API requests\n exception = log_api()\n if exception:\n return jsonify({'error': exception}), HTTPStatus.INTERNAL_SERVER_ERROR\n\n # Per the spec, path segments are used across all requests within a test\n path_segments = generate_path_segments()\n for i in range(0, count):\n # Randomly determine the number of segments in this request\n path_count = random.randrange(1, 7)\n\n # WARNING\n # host.docker.internal is NOT production safe.\n # The production domain should be taken from settings\n # or the environment.\n url = 'http://host.docker.internal:5000/api'\n\n while path_count > 0:\n url += '/{}'.format(path_segments[random.randrange(0, 3)])\n path_count -= 1\n url += '/'\n\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return jsonify({'error': 'request error'}), \\\n HTTPStatus.INTERNAL_SERVER_ERROR\n return '', HTTPStatus.OK", "def test_post(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.POST, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.post(rest_url)", "def test_get(self):\n url, port = self.server.address\n\n #couple of basic GETs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n # GETs with params\n r = self.client.get(\"http://{0}:{1}/get_with_params\".format(url, port),\n params=self.params)\n self.assertEqual(200, r.status_code)\n self.assertEqual(str(self.params), r.text)\n\n # GETs with ...?", "def test_request_url(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': None, 'limit': None})\n s = r._get_response(0, 0)\n assert_equals(httpretty.last_request().path, '/test?limit=0&offset=0')", "def mocked_webserver_service_api_base(\n app: FastAPI, webserver_service_openapi_specs: dict[str, Any]\n) -> Iterator[MockRouter]:\n settings: ApplicationSettings = app.state.settings\n assert settings.API_SERVER_WEBSERVER\n\n openapi = deepcopy(webserver_service_openapi_specs)\n assert Version(openapi[\"info\"][\"version\"]).major == 0\n\n # pylint: disable=not-context-manager\n with respx.mock(\n base_url=settings.API_SERVER_WEBSERVER.api_base_url,\n assert_all_called=False,\n assert_all_mocked=True,\n ) as respx_mock:\n # WARNING: For this service, DO NOT include /v0 in the `path` to match !!!!\n assert settings.API_SERVER_WEBSERVER.api_base_url.endswith(\"/v0\")\n\n # healthcheck_readiness_probe, healthcheck_liveness_probe\n response_body = {\n \"name\": \"webserver\",\n \"version\": \"1.0.0\",\n \"api_version\": \"1.0.0\",\n }\n\n respx_mock.get(path=\"/\", name=\"healthcheck_readiness_probe\").respond(\n status.HTTP_200_OK, json=response_body\n )\n respx_mock.get(path=\"/health\", name=\"healthcheck_liveness_probe\").respond(\n status.HTTP_200_OK, json=response_body\n )\n\n yield respx_mock", "def mock_all(aioclient_mock):\n aioclient_mock.post(\"http://127.0.0.1/homeassistant/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\"http://127.0.0.1/supervisor/ping\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/supervisor/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\n \"http://127.0.0.1/homeassistant/info\",\n json={\"result\": \"ok\", \"data\": {\"last_version\": \"10.0\"}},\n )", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_post$\", response.content)", "def test_all_http_stats(self):\n client = Client()\n response = client.get(reverse('home'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('browse_produce'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('browse_locations'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('search'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('faq'))\n self.assertEqual(200, response.status_code)", "def mock_all(aioclient_mock, request):\n aioclient_mock.post(\"http://127.0.0.1/openpeerpower/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\"http://127.0.0.1/supervisor/ping\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/supervisor/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\n \"http://127.0.0.1/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\"supervisor\": \"222\", \"openpeerpower\": \"0.110.0\", \"oppos\": None},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/store\",\n json={\n \"result\": \"ok\",\n \"data\": {\"addons\": [], \"repositories\": []},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/host/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"result\": \"ok\",\n \"data\": {\n \"coppis\": \"vm\",\n \"operating_system\": \"Debian GNU/Linux 10 (buster)\",\n \"kernel\": \"4.19.0-6-amd64\",\n },\n },\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/core/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/os/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/supervisor/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\"version_latest\": \"1.0.0\"},\n \"addons\": [\n {\n \"name\": \"test\",\n \"slug\": \"test\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"repository\": \"core\",\n \"url\": \"https://github.com/openpeerpower/addons/test\",\n },\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"repository\": \"core\",\n \"url\": \"https://github.com\",\n },\n ],\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/ingress/panels\", json={\"result\": \"ok\", \"data\": {\"panels\": {}}}\n )", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_get$\", response.content)", "def register_uri_for_http(self):\n # pull info on all locations and cas via ca_stats\n httpretty.register_uri(\n httpretty.GET,\n 'http://ca_stats_fake_url.com',\n body=get_json_data())\n # check each ca\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan033.dce.harvard.edu/admin/channel3/get_params.cgi',\n responses=[\n httpretty.Response(body='publish_type = 0'),\n httpretty.Response(body='publish_type = 6')]\n )\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan033.dce.harvard.edu/admin/channel4/get_params.cgi',\n body='publish_type = 6')\n # fix live/lowBR divergent live stream status:\n # live has precedence, so it sets lowBR channel publish_type to 0\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan033.dce.harvard.edu/admin/channel4/set_params.cgi',\n body='', status=201)\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan017.dce.harvard.edu/admin/channel3/get_params.cgi',\n body='publish_type = 6')\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan017.dce.harvard.edu/admin/channel4/get_params.cgi',\n body='publish_type = 6')\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan089.dce.harvard.edu/admin/channel3/get_params.cgi',\n body='publish_type = 0')\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan089.dce.harvard.edu/admin/channel4/get_params.cgi',\n body='publish_type = 0')\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan088.dce.harvard.edu/admin/channel3/get_params.cgi',\n body='publish_type = 0')\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan088.dce.harvard.edu/admin/channel4/get_params.cgi',\n body='publish_type = 0')\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan017.dce.harvard.edu/admin/channel3/set_params.cgi',\n body='', status=201)\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan017.dce.harvard.edu/admin/channel4/set_params.cgi',\n body='', status=201)\n httpretty.register_uri(\n httpretty.GET,\n 'http://fake-epiphan033.dce.harvard.edu/admin/channel3/set_params.cgi',\n body='', status=201)", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def test_timetracking_resource_methods(self, mock_url, resource_name, single_name):\n business_id = 1234\n resource_id = 2345\n resource_ = getattr(self.freshBooksClient, resource_name)\n\n list_response = {resource_name: [], \"meta\": {\"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}}\n single_response = {single_name: {}}\n\n with patch.object(TimetrackingResource, \"_request\", return_value=list_response) as mock_request:\n resource_.list(business_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(TimetrackingResource, \"_request\", return_value=single_response) as mock_request:\n resource_.get(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n resource_.create(business_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.POST, data={single_name: {}})\n\n resource_.update(business_id, resource_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.PUT, data={single_name: {}})\n\n resource_.delete(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)", "def test_orchestrator_http_simple(self):\n pass", "def test_mocked_get_api(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/154/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response.content)\n response2 = c.get(\"/apimock/mocked/api/account/187/\")\n self.assertEqual(response2.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response2.content)", "def test_get_with_parameters(self):\n self._register_uri(httpretty.GET)\n response = self.client.get(self.test_endpoint,\n foo=\"bar\", spam=\"eggs\")\n self.assertIn(\"OAuth\", self._last_request().headers[\"authorization\"])\n self.assertEqual(self._last_request().querystring[\"foo\"], [\"bar\"])\n self.assertEqual(self._last_request().querystring[\"spam\"], [\"eggs\"])\n self.assertEqual(response, self.test_data)\n self.assertEqual(self.client.last_url, self.test_uri)\n self.assertEqual(self.client.last_params, {\"foo\": b\"bar\",\n \"spam\": b\"eggs\"})\n self.assertEqual(self.client.last_response.json(), self.test_data)", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200", "def test_smoke_test(self):\n urls = [ ]\n urls.append('/')\n urls.append(reverse('api_doc'))\n urls.append(reverse('laws'))\n urls.append(reverse('issue_list_user', args=['test0']))\n\n for url in urls:\n response = self.client.get(url)\n self.assertEqual(response.status_code , 200)", "def test_patch(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.PATCH, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.patch(rest_url)", "def test_project_resource_methods(self, mock_url, resource_name, single_name):\n business_id = 1234\n resource_id = 2345\n resource_ = getattr(self.freshBooksClient, resource_name)\n\n list_response = {resource_name: [], \"meta\": {\"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}}\n single_response = {single_name: {}}\n\n with patch.object(ProjectsResource, \"_request\", return_value=list_response) as mock_request:\n resource_.list(business_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(ProjectsResource, \"_request\", return_value=single_response) as mock_request:\n resource_.get(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n resource_.create(business_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.POST, data={single_name: {}})\n\n resource_.update(business_id, resource_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.PUT, data={single_name: {}})\n\n resource_.delete(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)", "def _test_good(self,\n the_request_method,\n the_request_uri,\n the_request_headers,\n the_request_body,\n the_response_code,\n the_response_headers,\n the_response_body,\n the_response_content_type):\n\n the_response_is_ok = True\n the_request_principal = \"[email protected]\"\n\n def async_app_service_forwarder_forward_patch(http_client, request, callback):\n self.assertIsNotNone(request)\n\n expected_url = \"http://%s%s\" % (\n self.__class__._app_service,\n the_request_uri\n )\n self.assertEqual(request.url, expected_url)\n\n self.assertIsNotNone(request.method)\n self.assertEqual(request.method, the_request_method)\n\n self.assertIsNotNone(request.headers)\n self.assertEqual(len(request.headers), 1 + len(the_request_headers))\n expected_headers = tornado.httputil.HTTPHeaders(the_request_headers)\n expected_headers[\"Authorization\"] = \"%s %s\" % (\n self.__class__._app_service_auth_method,\n the_request_principal)\n self.assertEqual(request.headers, expected_headers)\n\n response = mock.Mock()\n response.error = None\n response.code = the_response_code\n response.body = the_response_body\n response.headers = tornado.httputil.HTTPHeaders(the_response_headers)\n if response.body:\n response.headers[\"Content-type\"] = the_response_content_type\n response.headers[\"Content-length\"] = str(len(response.body))\n response.request_time = 24\n callback(response)\n\n def on_async_app_service_forward_done(is_ok,\n http_status_code,\n headers,\n body):\n\n self.assertIsNotNone(is_ok)\n self.assertEqual(is_ok, the_response_is_ok)\n\n if not is_ok:\n return\n\n self.assertIsNotNone(http_status_code)\n self.assertEqual(http_status_code, the_response_code)\n\n self.assertIsNotNone(headers)\n\n if the_response_body is None:\n self.assertIsNone(body)\n\n self.assertEqual(headers, the_response_headers)\n else:\n self.assertIsNotNone(body)\n self.assertEqual(body, the_response_body)\n\n self.assertEqual(len(headers), 2 + len(the_response_headers))\n the_expected_headers = tornado.httputil.HTTPHeaders(the_response_headers)\n the_expected_headers[\"Content-type\"] = the_response_content_type\n the_expected_headers[\"Content-length\"] = str(len(body))\n self.assertEqual(headers, the_expected_headers)\n\n name_of_method_to_patch = \"tornado.httpclient.AsyncHTTPClient.fetch\"\n with mock.patch(name_of_method_to_patch, async_app_service_forwarder_forward_patch):\n aasf = async_app_service_forwarder.AsyncAppServiceForwarder(\n the_request_method,\n the_request_uri,\n the_request_headers,\n the_request_body,\n the_request_principal)\n aasf.forward(on_async_app_service_forward_done)", "def test_list_endpoints(self):\n routes = [\n '/',\n '/npm/<name>',\n '/nuget/<name>',\n '/ping',\n '/ping/npm',\n '/ping/nuget',\n '/ping/pypi',\n '/ping/rubygems',\n '/pypi/<name>',\n '/rubygems/<name>',\n ]\n expected = {}\n for num, route in enumerate(routes):\n expected[str(num)] = route\n\n response = self.app.get('/')\n assert json.loads(response.data) == expected", "def test_make_request_method(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.METHOD_KEY: SAMPLE_METHOD})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.post.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.post.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def test_simpleapp():\n class Handler(RequestHandler):\n def get(self):\n self.write('Hello')\n\n app = Application([url('/hello', Handler)])\n\n with Tester(app) as tester:\n response = yield tester.http_client.fetch(tester.url_for('/hello'))\n assert 'Hello' == text_body(response)", "def test_GET_call_api_and_return_200Ok(client):\n\n url = '/api/v1/calls/'\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK", "def mocked_requests_get(*args, **kwargs):\n response = {'message': '',\n 'data': {\n 'cti_token': 'secret-cti-token',\n 'passivetotal_token': 'secret-passivetotal-token',\n 'passivetotal_user': '[email protected]',\n 'shodan_token': 'secret-shodan-token'\n }\n }\n return MockResponse(json.dumps(response), 200, HEADERS)", "def test_magpie_prefix_request_with_multiple_route_url(self):\n base_url = \"http://localhost\"\n\n def mock_get_multiformat_post(*args, **kwargs):\n return get_post_item(*args, p=paths.pop(0), **kwargs)\n\n def get_post_item(request, name, default=None, p=None):\n from magpie.api.requests import get_multiformat_post as real_get_multiformat_post\n utils.check_val_equal(request.url, base_url + p,\n \"Proxied path should have been auto-resolved [URL: {}].\".format(url))\n return real_get_multiformat_post(request, name, default=default)\n\n for url in [\"http://localhost\", \"http://localhost/magpie\"]:\n paths = [\"/signin\", \"/signin_internal\"] # updated on each *direct* 'get_multiformat_post' call in 'login'\n app = utils.get_test_magpie_app({\"magpie.url\": url})\n\n with mock.patch(\"magpie.api.login.login.get_multiformat_post\", side_effect=mock_get_multiformat_post):\n data = {\"user_name\": \"foo\", \"password\": \"bar\"}\n headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON}\n resp = utils.test_request(app, \"POST\", paths[0], json=data, headers=headers, expect_errors=True)\n if LooseVersion(self.version) < LooseVersion(\"0.10.0\"):\n # user name doesn't exist\n utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\")\n else:\n # invalid username/password credentials\n utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\")", "def test_api_post(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('POST', url, status=201, body=b'some xml and stuff')\n response = new_job.request('post', url, data=b'stuff')\n assert response == b'some xml and stuff'\n assert httpretty.last_request().body == b'stuff'", "def test_make_request(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def testApi(self):", "def post(*args, **kwargs):\n endpoint = MockRequests.parse_arguments(*args, **kwargs)\n path = ''\n text = '{}'\n if not endpoint:\n path = os.path.join(TEST_DATA_DIR, 'account.json')\n elif endpoint == 'lists':\n path = os.path.join(\n TEST_DATA_DIR, 'lists.json')\n elif re.compile('lists/.*/interest-categories/.*/interests').match(\n endpoint):\n path = os.path.join(\n TEST_DATA_DIR, 'interests.json')\n elif re.compile('lists/.*/interest-categories').match(endpoint):\n path = os.path.join(\n TEST_DATA_DIR, 'lists_interest_categories.json')\n else:\n print('WARNING, unhandled endpoint in test: {0}'.format(endpoint))\n if path:\n with open(path) as datafile:\n text = datafile.read()\n # Return mock response with text.\n return Mock(text=text)", "def setUp(self):\n self.response = self.s.get(self.url, params=self.params)", "def test_accounting_gateways_resource_methods(self, mock_url):\n account_id = 1234\n resource_id = 2345\n\n list_response = {\"gateways\": [], \"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}\n single_response = {}\n with patch.object(AccountingResource, \"_request\", return_value=list_response) as mock_request:\n self.freshBooksClient.gateways.list(account_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(AccountingResource, \"_request\", return_value=single_response) as mock_request:\n self.freshBooksClient.gateways.delete(account_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.get(account_id, resource_id)\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.create(account_id, {})\n\n with pytest.raises(FreshBooksNotImplementedError):\n self.freshBooksClient.gateways.update(account_id, resource_id, {})", "def test_module(client, _args):\n uri = '/'\n client._http_request(method='GET', url_suffix=uri)\n return 'ok', None, None", "def test_get_method(self):\n self.getPage('/')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')\n self.assertBody('{\"mystring\": \"\"}')", "def test_api_helloworld():\n with app.test_client() as c:\n response = c.get('/')\n assert response.status_code == 200", "def mocked_requests_scrapping_get(*args, **kwargs):\n class MockResponse:\n def __init__(self, json_data, status_code, url):\n self.content = json_data\n self.status_code = status_code\n self.url = url\n self.cookies = {\"JSESSIONID\": \"jkghhjgjhgfjgfgjg\"}\n self.encoding = \"utf-8\"\n\n def json(self):\n return self.json_data\n\n dn = os.path.dirname(os.path.realpath(__file__))\n for url, provider in {f\"{settings.BASE_URL}/eAnnuaire/formulaire?appelRetour=true\": \"form\",\n f\"{settings.BASE_URL}/eAnnuaire/resultat\": \"suivant\",\n f\"{settings.BASE_URL}/eAnnuaire/fiche\": \"detail\"}.items():\n if args[0].startswith(url):\n with open(os.path.join(dn, \"fixtures\", f\"{provider}.html\"), \"rb\") as fp:\n return MockResponse(fp.read(), 200, args[0])", "def test_service_api_get(service_app):\n response = service_app.get('/')\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert json.loads(response.data) == {'description': 'service is up', 'status': 200}", "def test_post(self):\n url, port = self.server.address\n\n #couple of basic POSTs\n #request parameters\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)", "def mock_api():\n with open(os.path.join(HERE, 'response.json'), 'r') as fp:\n webargs_response = fp.read()\n # A valid package with a proper response\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/webargs/json',\n body=webargs_response,\n content_type='application/json'\n )\n # A valid package with no releases\n with open(os.path.join(HERE, 'response_noreleases.json'), 'r') as fp:\n foo_response = fp.read()\n\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/foo/json',\n body=foo_response,\n content_type='application/json'\n )\n\n # An invalid package name\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/nope/json',\n status=404\n )\n responses.start()\n\n yield responses\n\n responses.stop()", "def test_get_api_resources(self):\n pass", "def test(request, backend_usages, application, client, setup, expect_ok, expect_not_found):\n\n request.getfixturevalue(setup)\n\n assert len(backend_usages) == 2\n\n analytics = application.threescale_client.analytics\n\n for path in expect_ok:\n hits_before = hits(application, analytics)\n\n response = client.get(path)\n assert response.status_code == 200, f\"For path {path} expected status_code 200\"\n\n hits_after = resilient.stats_service_usage(\n application.threescale_client, application[\"service_id\"], \"hits\", \"total\", hits_before+1)\n\n assert hits_before + 1 == hits_after, f\"For path {path} expected hits to be increased by 1\"\n\n for path in expect_not_found:\n hits_before = hits(application, analytics)\n\n response = client.get(path)\n assert response.status_code == 404, f\"For path {path} expected status_code 400\"\n\n hits_after = hits(application, analytics)\n assert hits_before == hits_after, f\"For path {path} expected hits to be same before and after\"", "def test_get_with_real_data(self):\n self.getPage('/blah', method='PUT')\n self.getPage('/')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')\n self.assertBody('{\"mystring\": \"blah\"}')", "def test_simple1(self):\n api = self.load_api_description('simple1.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 1)\n\n resource = api.resources[0]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 1)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(string)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')", "def test_apis_wo_auth(self):\n\n # Order list API\n url = reverse('orders-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order summary API\n url = reverse('order-summary-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order create API\n url = reverse('orders-list')\n response = self.client.post(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Shares list/summary API\n url = reverse('shares-list', args=['summary'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n url = reverse('shares-list', args=['all'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_reuse():\n class Handler(RequestHandler):\n def get(self):\n self.write('Hello')\n\n app = Application([url('/hello', Handler)])\n\n tester = Tester(app)\n with tester:\n response = yield tester.http_client.fetch(tester.url_for('/hello'))\n assert 'Hello' == text_body(response)\n\n with pytest.raises(RuntimeError):\n tester.setup()", "def test_health(self):\n self.assert_request('get', '/_health')", "def test_response_route():\n sample_response = {\n \"items\": [\n { \"id\": 1, \"name\": 'apples', \"price\": \"$2\" },\n { \"id\": 2, \"name\": \"Peaches\", \"price\": \"$5\" }\n ]\n }\n response = make_response(jsonify(sample_response))\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Content-Type'] = 'application/json'\n return response", "def test_request():\n response = requests.get('http://jsonplaceholder.typicode.com/todos')\n assert response.ok", "def test_officer_access(self):\n self.client.login(self.officer.email)\n for url in self.urls_get:\n response = self.client.get(url, follow=False)\n self.assertEqual(200, response.status_code)\n for url in self.urls_post:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEquals(200, response.status_code)", "def test_ticketauth_canonical(requests_mock):\n app = PublMock()\n app.add_url_rule('/_tokens', 'tokens', tokens.indieauth_endpoint,\n methods=['GET', 'POST'])\n\n stash = {}\n\n def ticket_endpoint(request, _):\n import urllib.parse\n args = urllib.parse.parse_qs(request.text)\n assert 'subject' in args\n assert 'ticket' in args\n assert 'resource' in args\n stash['ticket'] = args['ticket']\n\n with app.test_client() as client:\n req = client.post(token_endpoint, data={\n 'grant_type': 'ticket',\n 'ticket': args['ticket']\n })\n token = json.loads(req.data)\n assert 'access_token' in token\n assert token['token_type'].lower() == 'bearer'\n stash['response'] = token\n\n with app.test_request_context('/'):\n token_endpoint = flask.url_for('tokens')\n\n for scheme in ('http', 'https'):\n requests_mock.get(f'{scheme}://canonical.ticketauth', text='''\n <link rel=\"ticket_endpoint\" href=\"https://foo.example/tickets\">\n <link rel=\"canonical\" href=\"https://canonical.ticketAuth\">\n <p class=\"h-card\"><span class=\"p-name\">pachelbel</span></p>\n ''')\n requests_mock.post('https://foo.example/tickets', text=ticket_endpoint)\n\n def test_url(identity, match):\n with app.test_request_context('/bogus'):\n request_url = flask.url_for('tokens')\n with app.test_client() as client:\n req = client.post(request_url, data={'action': 'ticket',\n 'subject': identity})\n LOGGER.info(\"Got ticket redemption response %d: %s\",\n req.status_code, req.data)\n assert req.status_code == 202\n assert req.data == b'Ticket sent'\n\n assert stash['response']['token_type'].lower() == 'bearer'\n assert stash['response']['me'] == match\n token = tokens.parse_token(stash['response']['access_token'])\n assert token['me'] == match\n\n req = client.get(token_endpoint, headers={\n 'Authorization': f'Bearer {stash[\"response\"][\"access_token\"]}'\n })\n assert req.status_code == 200\n assert req.headers['Content-Type'] == 'application/json'\n verified = json.loads(req.data)\n assert verified['me'] == match\n\n token_user = user.User(verified['me'])\n assert token_user.profile['name'] == 'pachelbel'\n\n for url in ('http://canonical.ticketauth', 'https://canonical.ticketauth',\n 'http://Canonical.TicketAuth'):\n test_url(url, 'https://canonical.ticketauth/')", "def setUp(self):\n self.response = self.client.get('/')", "def test_accounting_resource_methods(self, mock_url, resource_name, single_name, delete_via_update):\n account_id = 1234\n resource_id = 2345\n resource_ = getattr(self.freshBooksClient, resource_name)\n\n list_response = {resource_name: [], \"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}\n single_response = {single_name: {}}\n\n with patch.object(AccountingResource, \"_request\", return_value=list_response) as mock_request:\n resource_.list(account_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(AccountingResource, \"_request\", return_value=single_response) as mock_request:\n resource_.get(account_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n resource_.create(account_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.POST, data={single_name: {}})\n\n resource_.update(account_id, resource_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.PUT, data={single_name: {}})\n\n resource_.delete(account_id, resource_id)\n if delete_via_update:\n mock_request.assert_called_with(\"some_url\", HttpVerbs.PUT, data={single_name: {\"vis_state\": 1}})\n else:\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)", "def test_get_post_endpoints(self):\n self.addcontribgetrequest = self.factory.get(\n reverse(\"add_album_contrib\", kwargs={\"albumid\": self.testalbum.id}))\n self.addcontribgetrequest.user = self.u\n self.updateaccessgetrequest = self.factory.get(\n reverse(\"update_album_access\", kwargs={\"id\": self.testalbum.id}))\n self.updateaccessgetrequest.user = self.u\n self.addgroupgetrequest = self.factory.get(reverse(\"add_album_groups\", kwargs={\"albumid\":self.testalbum.id}))\n self.addgroupgetrequest.user = self.u\n\n self.assertRaises(Http404, album.add_contrib, self.addcontribgetrequest, self.testalbum.id)\n self.assertRaises(Http404, album.update_access_type, self.updateaccessgetrequest, self.testalbum.id)\n self.assertRaises(Http404, album.add_groups, self.addgroupgetrequest, self.testalbum.id)\n # todo: maybe make this a loop", "def basicRequest(self):\n endpoint = \"/foo\"\n\n def verify(request):\n o(request.method).equals(\"GET\")(\"Checking basic request method.\")\n o(request.url).equals(endpoint)(\"Checking basic request url.\")\n request.respond(200)\n self.testServer.respondWith(verify)\n\n server.request(endpoint)\n self.testServer.respond()", "def test_run_success():\n httpretty.register_uri(httpretty.GET, URL, body='<html></html>', status=200)\n with mock.patch('httsleep.main.sleep') as mock_sleep:\n httsleep = HttSleep(URL, {'status_code': 200})\n resp = httsleep.run()\n assert resp.status_code == 200\n assert not mock_sleep.called", "def test_health_endpoint(client):\n\n result = client.get('/health')\n\n assert result.status_code == 200\n assert result.json == {'status': 'Ok'}", "async def test_api_ping(hassio_handler, aioclient_mock: AiohttpClientMocker) -> None:\n aioclient_mock.get(\"http://127.0.0.1/supervisor/ping\", json={\"result\": \"ok\"})\n\n assert await hassio_handler.is_connected()\n assert aioclient_mock.call_count == 1", "def mocked_responses():\n with responses.RequestsMock() as rsps:\n yield rsps", "def test_ping(self):\n response = self.app.get('/ping')\n\n assert response.status_code == 200\n assert response.data == b\"pong\"", "def test_basic_fetch(client):\n\n res = client.get('/api/reminders')\n assert res.status_code == 200\n assert res.content_type == 'application/json'", "def test_06_test_via_endpoint(self):\n\n # set up all the bits we need\n dataset = []\n for i in range(10):\n data = ArticleFixtureFactory.make_incoming_api_article(doi=\"10.123/test/\" + str(i),\n fulltext=\"http://example.com/\" + str(i))\n dataset.append(data)\n\n # create the main account we're going to work as\n article_owner = models.Account()\n article_owner.set_id(\"test\")\n article_owner.set_name(\"Tester\")\n article_owner.set_email(\"[email protected]\")\n article_owner.generate_api_key()\n article_owner.add_role('publisher')\n article_owner.add_role('api')\n article_owner.save(blocking=True)\n\n # Add another user who doesn't own these articles\n somebody_else = models.Account()\n somebody_else.set_id(\"somebody_else\")\n somebody_else.set_name(\"Somebody Else\")\n somebody_else.set_email(\"[email protected]\")\n somebody_else.generate_api_key()\n somebody_else.add_role('publisher')\n somebody_else.add_role('api')\n somebody_else.save(blocking=True)\n\n assert article_owner.api_key != somebody_else.api_key\n\n # add a journal to the article owner account to create that link between account and articles\n journal = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))\n journal.set_owner(article_owner.id)\n journal.save(blocking=True)\n\n with self.app_test.test_request_context():\n with self.app_test.test_client() as t_client:\n\n # Bulk create\n # The wrong owner can't create articles\n resp = t_client.post(url_for('api_v3.bulk_article_create', api_key=somebody_else.api_key),\n data=json.dumps(dataset))\n assert resp.status_code == 400, resp.status_code\n\n # Bulk create\n # redirected from v1\n # resp = t_client.post(url_for('api_v1.bulk_article_create', api_key=somebody_else.api_key),\n # data=json.dumps(dataset))\n # assert resp.status_code == 301, resp.status_code\n\n # But the correct owner can create articles\n resp = t_client.post(url_for('api_v3.bulk_article_create', api_key=article_owner.api_key),\n data=json.dumps(dataset))\n assert resp.status_code == 201\n reply = json.loads(resp.data.decode(\"utf-8\"))\n assert len(reply) == len(dataset)\n first_art = reply.pop()\n assert first_art['status'] == 'created'\n # Check we actually created new records\n time.sleep(1)\n assert len(models.Article.all()) == len(dataset)\n\n # Bulk delete\n all_but_one = [new_art['id'] for new_art in reply]\n resp = t_client.delete(url_for('api_v3.bulk_article_delete', api_key=article_owner.api_key),\n data=json.dumps(all_but_one))\n assert resp.status_code == 204\n time.sleep(1)\n # we should have deleted all but one of the articles.\n assert len(models.Article.all()) == 1\n # And our other user isn't allowed to delete the remaining one.\n resp = t_client.delete(url_for('api_v3.bulk_article_delete', api_key=somebody_else.api_key),\n data=json.dumps([first_art['id']]))\n assert resp.status_code == 400", "def test_projects_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_project_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project',\n json=expected_response,\n status=200\n )\n resp = requests.get(f'{os.environ[\"AIVEN_API_URL\"]}/v1/project')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == f'{os.environ[\"AIVEN_API_URL\"]}/v1/project'\n assert \"MY-PROJECT-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "async def test_get_with_params_in_url(app, aiohttp_server):\n server = await aiohttp_server(app)\n url = \"http://localhost:%d?foo=bar\" % server.port\n\n async with aiosonic.HTTPClient() as client:\n res = await client.get(url)\n assert res.status_code == 200\n assert await res.text() == \"bar\"\n await server.close()", "def setUp(self):\n APP.config.from_object(CONFIGS['testing_config'])\n self.api = APP\n self.api_context = self.api.app_context()\n self.api_context.push()\n self.api_test_client = APP.test_client()\n\n # Base url common to all endpoints\n self.BASE_URL = '/api/v1'\n # Sample data for POST requests\n self.ORDER = {\n 'item_name': 'Big Samosa',\n 'item_price': 200,\n 'quantity': 1\n }\n\n self.ORDER_2 = {\n 'item_name': 'Pork Ribs',\n 'item_price': 1080,\n 'quantity': 1\n }", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def test_getting_todos(mock_get):\n mock_get.return_value.ok = True\n\n # Call the service, which will send a request to the server.\n response = get_todos()\n\n # If the request is sent successfully, expect a response to be returned.\n assert response is not None", "def test_manager_reports_total_duration():\n\n httpretty.register_uri(\n httpretty.GET,\n \"http://test.com/test_1\",\n body=httpretty_body_that_waits_and_returns(0.1, [200, {}, \"Hello!\"]),\n )\n httpretty.register_uri(\n httpretty.GET,\n \"http://test.com/test_2\",\n body=httpretty_body_that_waits_and_returns(0.4, [200, {}, \"World!\"]),\n )\n\n report_path = os.path.join(REPORT_DIR, \"duration_report_with_delay_and_retry.json\")\n spintest(\n [\"http://test.com\"],\n [{\"method\": \"GET\", \"route\": \"/test_1\"}, {\"method\": \"GET\", \"route\": \"/test_2\"}],\n generate_report=report_path,\n )\n spintest_reports = read_report(report_path)\n first_duration = spintest_reports[0][\"reports\"][0][\"duration_sec\"]\n second_duration = spintest_reports[0][\"reports\"][1][\"duration_sec\"]\n\n total_duration_calcuate = first_duration + second_duration\n total_duration = spintest_reports[0][\"total_duration_sec\"]\n assert total_duration == total_duration_calcuate\n assert total_duration >= 0.5", "def _mock_requests_get(text):\n return lambda *args, **kwargs: PrometheusClientTest.FakeResponse(\n args[0], text, 200,\n )", "def test_easily_reusable(self):\n result = get_api_url()\n\n self.assertEqual(result, 'https://FQDN/afp-api/latest')\n self.mock_sanitize_host.assert_called_once_with('afp')", "def test_requests():\n app = webtest.TestApp(WebService(TestFactory()))\n # Check invalid request (bad values)\n response = app.get(\"/?id=bad\", expect_errors=True)\n assert_equal(response.status_int, 400)\n assert_equal(response.status, \"400 Bad Request\")\n assert_equal(response.content_type, \"text/plain\")\n # Check invalid request (duplicates)\n response = app.get(\"/?id=BOU&id=BOU\", expect_errors=True)\n assert_equal(response.status_int, 400)\n assert_equal(response.status, \"400 Bad Request\")\n assert_equal(response.content_type, \"text/plain\")\n # Check valid request (upper and lower case)\n response = app.get(\"/?id=BOU\")\n assert_equal(response.status_int, 200)\n assert_equal(response.status, \"200 OK\")\n assert_equal(response.content_type, \"text/plain\")\n # Test internal server error (use fake factory)\n app = webtest.TestApp(WebService(ErrorFactory(), error_stream=None))\n response = app.get(\"/?id=BOU\", expect_errors=True)\n assert_equal(response.status_int, 500)\n assert_equal(response.status, \"500 Internal Server Error\")\n assert_equal(response.content_type, \"text/plain\")", "def test_api(test_name, endpoint, method, body, expected_response, expected_status_code, validation, params):\n response = None\n with allure.step(' '.join(['getting API response on endpoint:', str(endpoint)])):\n response = APIRequestor().request(method=method, url_path=endpoint, body=body, params=params)\n with allure.step(' '.join(['Asserting API status code expected:', str(expected_status_code), ', with response:', str(response.status_code)])):\n Compare.equal.__call__(a=expected_status_code, b=response.status_code, free_text=f\"Status code is not as expected: {response.status_code} instead of expected: {expected_status_code}\")\n with allure.step('starting API validation'):\n validation = 'equal' if not validation else validation\n with allure.step(' '.join(['Validation with method:', str(validation)])):\n Compare.__dict__[validation](a=str(response), b=str(expected_response),\n free_text=f\"Failed to compare, Response is not as expected: {response} instead of {expected_response}\")", "def test_get_without_oauth(self):\n self.client = trovebox.Trovebox(host=self.test_host)\n self._register_uri(httpretty.GET)\n response = self.client.get(self.test_endpoint)\n self.assertNotIn(\"authorization\", self._last_request().headers)\n self.assertEqual(response, self.test_data)", "def test_api_use_method_post(self):\n body = Body()\n response = self.client.open(\n '/api/use/{method}/'.format(method='method_example'),\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_run_retries():\n responses = [httpretty.Response(body=\"Internal Server Error\", status=500),\n httpretty.Response(body=\"Internal Server Error\", status=500),\n httpretty.Response(body=\"<html></html>\", status=200)]\n httpretty.register_uri(httpretty.GET, URL, responses=responses)\n with mock.patch('httsleep.main.sleep') as mock_sleep:\n resp = HttSleep(URL, {'status_code': 200}).run()\n assert mock_sleep.called\n assert mock_sleep.call_count == 2\n assert resp.status_code == 200\n assert resp.text == '<html></html>'", "def requestsmock():\n with requests_mock.mock() as m:\n yield m", "def test_request():\n return make_response(\"ok\")", "def test_head(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.HEAD, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.head(rest_url)", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def setUp(self):\r\n self.client = Client()\r\n self.ping_url = reverse('status.service.celery.ping')", "def test_get(self):\n response = self.client.get('/weather/', format=\"json\")\n self.assertEqual(response.status_code, 200)", "def test_simple2(self):\n api = self.load_api_description('simple2.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 1)\n\n resource = api.resources[0]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)", "def test_comment_resource_methods(self, mock_url, resource_name, single_name):\n business_id = 1234\n resource_id = 2345\n resource_ = getattr(self.freshBooksClient, resource_name)\n\n list_response = {resource_name: [], \"meta\": {\"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}}\n single_response = {single_name: {}}\n\n with patch.object(CommentsResource, \"_request\", return_value=list_response) as mock_request:\n resource_.list(business_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(CommentsResource, \"_request\", return_value=single_response) as mock_request:\n resource_.get(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n resource_.create(business_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.POST, data={single_name: {}})\n\n resource_.update(business_id, resource_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.PUT, data={single_name: {}})\n\n resource_.delete(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)", "async def test_request(client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n return False\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request('get', conftest.API_URL)\n\n assert resp == resp_text\n\n assert 1 == mock_refresh_token_called\n assert 1 == len(mocked.requests)\n request = mocked.requests.popitem()[1][0]\n authorization_header = request.kwargs['headers']['Authorization']\n assert authorization_header == f'Bearer {client._auth_client.token}'\n assert 2 == len(caplog.records)", "def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n ...\n # test that you're getting a template", "def test_getting_todos_again():\n with patch('project.services.requests.get') as mock_get:\n # Configure the mock to return a response with an OK status code.\n mock_get.return_value.ok = True\n\n # Call the service, which will send a request to the server.\n response = get_todos()\n\n # If the request is sent successfully, expect a response to be returned.\n assert response is not None", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def _register_uri(self, method, uri=test_uri, data=None, body=None,\n **kwds):\n if data is None:\n data = self.test_data\n if body is None:\n body = json.dumps(data)\n httpretty.register_uri(method, uri=uri, body=body, **kwds)", "def test_get_next_to_arrive(self, mock_requests):\n\n r = services.get_next_to_arrive(self.a, self.b)\n params = {'req1': self.a, 'req2': self.b}\n\n self.assertTrue(\n mock.call.get(services.SEPTA_NEXTTOARRIVE_URL, params=params) in\n mock_requests.mock_calls)", "def setUp(self):\n self.client = HTTPClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}", "def test_api_requests_error_status(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('GET', url, status=500, body=b'some xml and stuff')\n with pytest.raises(Exception) as e:\n with mock.patch('salesforce_bulk_api.time.sleep') as sleep:\n new_job.request('get', url, expected_response=200)\n assert sleep.call_count == 0\n assert 'Unexpected status 500' in str(e)", "def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)", "def setUp(self):\n self.client = RequestsClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}" ]
[ "0.7003493", "0.6884988", "0.6701029", "0.66182", "0.6446721", "0.6388721", "0.6341374", "0.62644625", "0.62517893", "0.625138", "0.62413937", "0.6225622", "0.6221288", "0.6194367", "0.61357135", "0.61174285", "0.6117166", "0.609931", "0.60828453", "0.607748", "0.6064459", "0.6030262", "0.5997931", "0.59927976", "0.59871954", "0.5979991", "0.59691334", "0.59507143", "0.5941582", "0.5941081", "0.5938535", "0.5934949", "0.59048134", "0.5900024", "0.58984864", "0.5895186", "0.58923835", "0.5870918", "0.5860004", "0.58532506", "0.5844947", "0.58273035", "0.58136165", "0.58060795", "0.58050936", "0.5802016", "0.5798286", "0.57979506", "0.5787216", "0.57870835", "0.5783253", "0.57829845", "0.5763248", "0.5762912", "0.57533383", "0.57374865", "0.5729866", "0.57285196", "0.5725374", "0.5724354", "0.57236445", "0.57069314", "0.57027507", "0.56986684", "0.56935567", "0.5681573", "0.5680394", "0.56763506", "0.567528", "0.5670802", "0.56663096", "0.56662107", "0.5653378", "0.5649741", "0.5645701", "0.5632357", "0.562928", "0.562823", "0.5626985", "0.5626886", "0.56256795", "0.5625196", "0.56199205", "0.5617034", "0.5613021", "0.5606131", "0.56051016", "0.5603124", "0.56019807", "0.5598001", "0.55914044", "0.5589855", "0.55825853", "0.55820906", "0.55800086", "0.5579597", "0.55755645", "0.5573898", "0.5571504", "0.55714697" ]
0.5968934
27
Return a fake assertion after checking that the input is what we expect.
def assertion_callback(_request, _uri, headers): assert b'private_key=fake_private_key_here' in _request.body assert b'user_id=myself' in _request.body assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body return (200, headers, 'fake_saml_assertion')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_func(input, expected):\n from parenthetics import paren\n assert paren(input) == expected", "def test_example():\n answer = True\n expected = True\n assert answer == expected", "def expected_value(expected, actual):\n assert expected == actual", "def validate_Assert(result, _dummy_condition):\n return result", "def _expected_inputs():", "def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true", "def test_mock_input():\n mock_message = mock.Mock()\n mock_words = mock.Mock()\n with pytest.raises(TypeError):\n message_checker(mock_message, mock_words)", "def test_absolute_truth():\n assert True", "def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)", "def assertion_passed(self, func):", "async def test_assertion_rewriting(_):\n with pytest.raises(AssertionError) as e:\n assert 1 == 42\n assert \"42\" in str(e), f\"Assertion rewriting seems not to work, message was {e}\"", "def check_input_matches_expected_output(in_, out):\n ...", "def test_bad_input(alice):\n with pytest.raises(TypeError):\n alice.pack(\"blah\")", "def test_expect_ok(self) -> None:\n assert Ok(2).expect(\"err\") == 2", "def _expected_inputs():\n return 1", "def assertOutput(cls, expected, actual):\n if expected != actual:\n raise Exception(\"'\" + expected + \"' != '\" + actual + \"'\")", "def test_fun(email: str, expected: bool):\n assert fun(email) is expected", "def test_ask_yes_no_5(self, input_mock):\n input_mock.side_effect = [\"invalid\", \"invalid\", \"Y\"]\n response = basic.ask_yes_no(response_attempt=3)\n self.assertTrue(response)", "def do_test_expected(self):\n self.maxDiff = None\n\n got = \"\"\n if 'error' in test_src:\n self.assertRaises(test_src['error'], tested_function,\n test_src['in'], options)\n else:\n want = test_src['out']\n got = tested_function(test_src['in'], options)\n logging.debug('got = type %s', type(got))\n logging.debug(\"test_src['out'] = %s\",\n unicode(test_src['out']))\n self.assertEqual(got, want, \"\"\"Result matches\n expected = %s\n\n observed = %s\n \"\"\" % (want, got))", "def test_exactly(self):\n\n x = t.Exactly(\"x\")\n self.assertEqual(writePython(x),\n dd(\"\"\"\n _G_exactly_1, lastError = self.exactly('x')\n self.considerError(lastError, None)\n _G_exactly_1\n \"\"\"))", "def boolean():\n\n bool(Assert(1))\n\n with Assert.raises(AssertionError):\n bool(Assert(0))", "def test_raw_input_ex(input_output):\n with mock.patch.object(builtins, 'input', lambda _: input_output):\n assert GC.raw_input_ex() == input_output", "def __my_assert_args(function, args, expected_output, check_type=False):\n argstr = str(args).replace(',)', ')')\n output = function(*args)\n\n # Controleer eerst het return-type (optioneel)\n if check_type:\n msg = f\"Fout: {function.__name__}{argstr} geeft geen {type(expected_output)} terug als return-type\"\n assert type(output) is type(expected_output), msg\n\n # Controleer of de functie-uitvoer overeenkomt met de gewenste uitvoer\n if str(expected_output) == str(output):\n msg = f\"Fout: {function.__name__}{argstr} geeft {output} ({type(output).__name__}) \" \\\n f\"in plaats van {expected_output} (type {type(expected_output).__name__})\"\n else:\n msg = f\"Fout: {function.__name__}{argstr} geeft {output} in plaats van {expected_output}\"\n\n if type(expected_output) is float and isinstance(output, (int, float, complex)):\n # Vergelijk bij float als return-type op 7 decimalen om afrondingsfouten te omzeilen\n assert round(output - expected_output, 7) == 0, msg\n else:\n assert output == expected_output, msg", "def test_single_value(self, test_input, expected, sc):\n assert sc.add(test_input) == expected", "def test_should_be_ok(self):\n self.assertTrue(True)", "def test_of_with_args(self) -> None:\n assert Result.of(lambda x: bool(x > 0), 1).unwrap() is True", "def boolean():\r\n\r\n bool(Assert(1))\r\n\r\n with Assert.raises(AssertionError):\r\n bool(Assert(0))", "def _expected_inputs():\n return 3", "def test_allowed(self):\n user_input = [\"1\",\"1\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You can take this course.\")", "def test_input1(self):\n in1 =\"elephant\"\n result = options.check(in1)\n self.assertEqual(result,True)", "def test_111(self):\n user_input = [\"1\",\"1\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_input2(self):\n in1 =\"aple\"\n result = options.checkw(in1)\n self.assertEqual(result,False)", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_001(self):\n user_input = [\"0\",\"0\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def assert_verbose(actual, expected):\n assert expected == actual, f\"Expected value: {expected}. But actual value is {actual}\"", "def test_011(self):\n user_input = [\"0\",\"1\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_000(self):\n user_input = [\"0\",\"0\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_101(self):\n user_input = [\"1\",\"0\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_100(self):\n user_input = [\"1\",\"0\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_check_is_required(fake_check):\n assert fake_check.is_required()", "def test_010(self):\n user_input = [\"0\",\"1\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')", "def _assert_raises_assertion(expected_message):\n\n class Context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is None:\n raise AssertionError(\"no AssertionError raised\")\n if exc_type != AssertionError:\n return False\n if str(exc_val) != expected_message:\n raise AssertionError(\n \"expected exception message {!r}, got {!r}\".format(\n expected_message, str(exc_val)\n )\n )\n return True\n\n return Context()", "def test_return_final_seq_user_input_valid():\n for valid_case in [True, False]:\n assert RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 7]],\n hidden_activations=\"relu\", initialiser=\"xavier\", return_final_seq_only=valid_case, input_dim=15)\n\n for invalid_case in [[True], 22, [1, 3], (True, False), (5, False)]:\n with pytest.raises(AssertionError):\n print(invalid_case)\n RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 7]],\n hidden_activations=\"relu\", initialiser=\"xavier\", return_final_seq_only=invalid_case, input_dim=15)", "def test_is_ok(self) -> None:\n assert Ok(1).is_ok()\n assert not Err(1).is_ok()", "def _verify(\n hass,\n expected_state,\n expected_percentage,\n expected_oscillating,\n expected_direction,\n expected_preset_mode,\n):\n state = hass.states.get(_TEST_FAN)\n attributes = state.attributes\n assert state.state == str(expected_state)\n assert attributes.get(ATTR_PERCENTAGE) == expected_percentage\n assert attributes.get(ATTR_OSCILLATING) == expected_oscillating\n assert attributes.get(ATTR_DIRECTION) == expected_direction\n assert attributes.get(ATTR_PRESET_MODE) == expected_preset_mode", "def check(input, options):\r\n expected = [(o, o) for o in options]\r\n self.assertEqual(f(input), expected)", "def test_task178e(input_value, expected_value):\r\n assert algo.Task178e.main_logic(input_value) == expected_value", "def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0):\n if actual.dtype.kind == 'O':\n dtype = np.array(actual.flat[0]).dtype\n actual, desired = actual.astype(dtype), desired.astype(dtype)\n assert_allclose(actual, desired, rtol, atol)", "def test_example():\n x = 0\n y = 1\n assert x != y", "def test_ask_yes_no_2(self, input_mock):\n response = basic.ask_yes_no()\n self.assertFalse(response)", "def be(self, expected):\n self.do_assertion(self.subject == expected, \"be\", expected)\n return self", "def test_utils_to_bool(self, tcex, input_, expected):\n result = tcex.utils.to_bool(input_)\n assert result == expected, f'Input {input_} result of {result} != {expected}'", "def test_ask_yes_no_1(self, input_mock):\n response = basic.ask_yes_no()\n self.assertTrue(response)", "def test_valid_input_succeeds(self, async_patch, chan_patch):\n self.assertTrue(send_rotate_to_can(self.USER, self.BIN_NUM))\n async_patch.assert_called_once()\n chan_patch.assert_called_once()", "def assert_same(result, expect):\n assert sorted(result) == sorted(expect)", "def test_validate_answer(self):\r\n problem = self.build_problem(answer=\"42\")\r\n responder = problem.responders.values()[0]\r\n self.assertTrue(responder.validate_answer('23.5'))\r\n self.assertFalse(responder.validate_answer('fish'))", "def verify_ret(self, ret, expected_ret):\n assert ret == expected_ret, (\n \"Function should return: \"\n + ret_vals_dictionary[expected_ret]\n + \".\\nInstead returned: \"\n + ret_vals_dictionary[ret]\n )", "def test_calculate_contract_fee(a, b, expected):\n assert calculate_contract_fee(a, b) == expected", "def test_check_inputs():\n # Check inputs list - Pass\n assert layer_util.check_inputs([], 0) is None\n\n # Check inputs tuple - Pass\n assert layer_util.check_inputs((), 0) is None\n\n # Check inputs int - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs(0, 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple\" in msg\n\n # Check inputs float - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs(0.0, 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple\" in msg\n\n # Check size float - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs([1], 0.5)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n\n # Check size 0 - Pass\n assert layer_util.check_inputs([], 0) is None\n assert layer_util.check_inputs((), 0) is None\n\n # Check size 0 - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs([0], 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs((0,), 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n\n # Check size 1 - Pass\n assert layer_util.check_inputs([0], 1) is None\n assert layer_util.check_inputs((0,), 1) is None\n\n # Check size 1 - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs([], 1)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs((), 1)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n\n # Check msg spacing - Pass\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs(0, 0, msg=\"Start of message\")\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Start of message\" in msg", "def assert_that(matchee, matcher, message='', verbose=False):\n matcher = Annotate.if_message(message, matcher)\n mismatch = matcher.match(matchee)\n if not mismatch:\n return\n raise MismatchError(matchee, matcher, mismatch, verbose)", "def test_valid_email():\n assert_equals(is_valid_email(\"[email protected]\") is None, False)", "def test_check_validity(game):\n\n game.solve()\n assert game.check_validity()", "def test_example():\n with pytest.raises(\n AssertionError,\n match=expected_error_match,\n ):\n actual = {\n \"test1\": 1,\n \"test2\": \"foo\",\n \"bar\": {\"cheese\": \"parrot\", \"rabbit\": [\"black\", \"knight\"], \"other\": \"oops\"},\n }\n assert actual == Alike(\n {\n \"something\": A.is_missing,\n \"test2\": \"foo\",\n \"test1\": A < 2,\n \"bar\": {\n \"cheese\": A.is_present,\n \"rabbit\": [\"black\", \"wrong\"],\n \"other\": A.is_missing,\n },\n }\n )", "def _Assert(self, t):\n self.RaiseError(t, \"Assert not supported\")", "def equality():\r\n\r\n Assert(1) == 1\r\n Assert(1) != 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) == 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) != 1", "def test_validates(if_statement_validator):\n test = {\n 'condition': 'is',\n 'target': 'bob',\n 'then': 'arne',\n }\n assert if_statement_validator(test).unwrap() == test", "def _value_assert(current_key, actual_value, expected_value):\n if actual_value is None:\n return\n if isinstance(actual_value, list) and isinstance(expected_value, list):\n _list_assert(actual_value, expected_value)\n elif isinstance(actual_value, dict) and isinstance(expected_value, dict):\n _dict_assert(actual_value, expected_value)\n else:\n assert actual_value == expected_value, \"key: {}\".format(current_key)", "def equality():\n\n Assert(1) == 1\n Assert(1) != 0\n\n with Assert.raises(AssertionError):\n Assert(1) == 0\n\n with Assert.raises(AssertionError):\n Assert(1) != 1", "def do_test_expected(self):\n self.maxDiff = None\n\n # We currently don't throw any exceptions in Writer, so this\n # this is always false\n if 'error' in test_src:\n self.assertRaises(test_src['error'], yamlish.dumps,\n test_src['in'], options)\n else:\n logging.debug(\"out:\\n%s\", textwrap.dedent(test_src['out']))\n want = yaml.load(textwrap.dedent(test_src['out']))\n logging.debug(\"want:\\n%s\", want)\n with tempfile.NamedTemporaryFile() as test_file:\n tested_function(test_src['in'], test_file)\n test_file.seek(0)\n got_str = test_file.read()\n logging.debug(\"got_str = %s\", got_str)\n got = yaml.load(got_str)\n self.assertEqual(got, want, \"Result matches\")", "def check_result(context, expected):\n assert context.result == expected, \"Wrong result: {r} != {e}\".format(\n r=context.result, e=expected\n )", "def expected(x, y):", "def expected(x, y):", "def expected(x, y):", "def assert_state(actual: State | None, expected: State | None) -> None:\n if actual is None or expected is None:\n assert actual == expected\n return\n assert actual.entity_id == expected.entity_id\n assert actual.state == expected.state\n assert actual.attributes == expected.attributes", "def test_validate_input_valid(self):\n final_config = self.dtm1.validate_input('00001111')\n nose.assert_equal(final_config[0], 'q4')\n nose.assert_equal(str(final_config[1]), 'TMTape(\\'xxxxyyyy.\\')')", "def _assert_wrapper(obj1, obj2, expected_type=None, do_raise=False, **kwargs):\n try:\n _assert_equal(obj1, obj2, expected_type=expected_type, **kwargs)\n except AssertionError:\n if do_raise or hasattr(brewtils.test, \"_running_tests\"):\n raise\n return False\n\n return True", "def test_assert_bytes():\n if backwards.PY2: # pragma: Python 2\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes('hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n unicode('hello'))\n else: # pragma: Python 3\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes(b'hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n 'hello')", "def assertValue(self, indata, expected_output, message=None):\n outstream = StringIO()\n giganticGrep(indata, outstream)\n value = outstream.getvalue()\n self.assertEqual(value, expected_output, message)", "def verify():", "def assert_is(self, first, second, msg=None):\r\n assert first is second", "def verify_is(self, first, second, msg=None):\r\n try:\r\n self.assert_is(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def fake_input(inputs):\n it = iter(inputs)\n def mock_input(prompt=''):\n try:\n return next(it)\n except StopIteration as e:\n raise EOFError('No more inputs given') from e\n\n return patch('builtins.input', mock_input)", "def test__validate_message_notification__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_message_notification(input_value)", "def _assert(condition, message):\n if not condition:\n raise AssertionError(message)", "def _not_assert(function):\n @wraps(function)\n def flipped(*args, **kwargs):\n try:\n function(*args, **kwargs)\n raise AssertionError()\n except AssertionError:\n return\n return flipped", "def assert_equal(self, first, second, msg=\"\"):\r\n assert first == second", "def make_assertions(input_pipe, other_pipes, output_pipe):\n assert isinstance(input_pipe, elements.InPypElement), 'Wrong input element type, want a InPypElement!'\n assert isinstance(output_pipe, elements.OutPypElement), 'Wrong output element type, want a OutPypElement!'\n for other_pipe in other_pipes:\n assert isinstance(other_pipe, elements.MidPypElement), 'Wrong middle element type, want a MidPypElement!'", "def get_verified_input(prompt, verify_by_func, msg_wrong=None):\n if msg_wrong is None:\n # stock error message\n msg_wrong = \"Invalid input.\"\n\n # raw\n answer = input_centered(prompt)\n # the answer must match conditions of verification\n while not verify_by_func(answer):\n sys_comment(msg_wrong, is_error=True)\n answer = input_centered(prompt)\n \n # at this point, the answer is verified to be valid\n return answer", "def test_task555(input_value, expected_value):\r\n assert list(algo.Task555.main_logic(input_value)) == expected_value", "def assert_equals(expected,received,message=None):\n if (expected != received):\n if message is None:\n message = 'assert_equals: expected %s but instead got %s' % (repr(expected),repr(received))\n quit_with_error(message)", "def verify(case_name, test_input, test_target, test_func):\n actual_output = test_func(*test_input)\n print(case_name, test_input, ' target:', test_target,\n ' output:', actual_output)\n assert(test_target == actual_output)", "def verify(case_name, test_input, test_target, test_func):\n actual_output = test_func(*test_input)\n print(case_name, test_input, ' target:', test_target,\n ' output:', actual_output)\n assert(test_target == actual_output)", "def assert_almost_equal(actual, desired, decimal=7):\n actual, desired = check_and_drop_units(actual, desired)\n numpy.testing.assert_almost_equal(actual, desired, decimal)", "def test_always_succeed():\n assert True", "def test_out_of_order(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"tan\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')", "def test_casting_without_iterable(test_fixture, test_input, expected):\n test_fixture.cast_prop = test_input\n assert test_input == test_fixture.cast_prop == expected\n assert type(test_fixture.cast_prop) == type(expected)", "def test_expect_raising(self, exc_cls: t.Type[Exception]) -> None:\n exp_exc: t.Type[Exception] = exc_cls if exc_cls else RuntimeError\n kwargs = {\"exc_cls\": exc_cls} if exc_cls else {}\n input_val = 2\n msg = \"not what I expected\"\n\n with pytest.raises(exp_exc) as exc_info:\n Err(input_val).expect(msg, **kwargs)\n\n assert msg in str(exc_info.value)\n assert str(input_val) in str(exc_info.value)", "def test_inputs_are_needed():\n with pytest.raises(TypeError):\n song_decoder()", "def test_validate_characters(self, input_string, expected_result):\n with patch('sys.exit', autospec=True) as m_sys_exit:\n # Call method under test\n test_result = validate_characters(input_string)\n\n # Assert expected result\n self.assertEqual(expected_result, test_result)", "def test_task88d(input_value, expected_value):\r\n assert algo.Task88d.main_logic(input_value) == expected_value" ]
[ "0.6541787", "0.647255", "0.6471959", "0.64491946", "0.64080536", "0.62548614", "0.62316114", "0.6227208", "0.61887175", "0.6130757", "0.61267287", "0.6095201", "0.60947305", "0.6071231", "0.60602826", "0.6057711", "0.6053218", "0.60242915", "0.6014357", "0.5987886", "0.5969211", "0.59456927", "0.5938013", "0.5937278", "0.5883519", "0.5876175", "0.58749056", "0.5872409", "0.5868103", "0.5866499", "0.58560526", "0.58387953", "0.57730246", "0.5769951", "0.57659906", "0.5762643", "0.5751014", "0.57367736", "0.57200414", "0.57130563", "0.5712026", "0.5710498", "0.57017064", "0.5679358", "0.5672377", "0.56684744", "0.56666434", "0.566446", "0.5656972", "0.56372577", "0.5635428", "0.5630137", "0.5616431", "0.5611828", "0.56053597", "0.560503", "0.5598065", "0.55889773", "0.55838335", "0.55784595", "0.55611753", "0.55604416", "0.5547329", "0.554601", "0.55387384", "0.55338484", "0.5524319", "0.55229145", "0.55168974", "0.551314", "0.5510241", "0.55088323", "0.55088323", "0.55088323", "0.54949325", "0.5492912", "0.54896885", "0.54794925", "0.54762524", "0.5474996", "0.54746413", "0.54694146", "0.54683304", "0.54642564", "0.54604065", "0.54525083", "0.54519886", "0.5450068", "0.54496694", "0.54464597", "0.5444341", "0.54438895", "0.54438895", "0.5442366", "0.5440321", "0.54375154", "0.5436624", "0.5430176", "0.5429492", "0.5428825", "0.54287416" ]
0.0
-1
Return a 404 error when someone tries to call the URL.
def bad_callback(_request, _uri, headers): return (404, headers, 'NOT AN ASSERTION')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def not_found():\n return HttpError(404)", "def error_404(error):\n return 'Bummer, there is nothing at this URL.'", "def not_found():\n raise cherrypy.HTTPError(404, \"Not Found.\")", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def error_404(error):\n return '404 Error'", "def page_not_found(err):\n return error_formatter(code='404', details=err, parm1=request.path)", "def error_not_found(error):\n return 'No page here, dood. 404!', 404", "def not_found(self, request):\n fmt = 'The path <q>%s</q> was not understood by this server.'\n msg = fmt % (request['parsed_uri'],)\n openid_url = request['query'].get('openid_url')\n return self.render(request, msg, 'error', openid_url, status='404 Not Found')", "def handleStatus_404(self):\n log.err('HTTP Error 404')", "def page_not_found(e):\n return jsonify({\"error\": \"Not found\"}), 404", "def page_not_found(e):\n return jsonify({\"error\": \"Not found\"}), 404", "def page_not_found(e):\n return 'Þessi vefslóð er ekki rétt', 404", "def error_404(error):\n return 'Data Service Error'", "def test_get_404(self):\n url = self.baseurl + \"/do-not-implement-this-page-it-is-not-found\"\n try:\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( False, \"Should have thrown an HTTP Error!\")\n except urllib2.HTTPError as e:\n self.assertTrue( e.getcode() == 404 , (\"404 Not FOUND! %d\" % e.getcode()))\n else:\n self.assertTrue( False, \"Another Error was thrown!\")", "def normal404(e):\n return jsonify({\"error\": \"Not found\"}), 404", "def not_found(self, error):\n return jsonify({'error': 'NOT FOUND'}), 404", "def not_found(e):\n return render_template(\"errors/404.html\"), 404", "def page_not_found(e):\n return render_template('404.html'), 404", "def page_not_found(e):\n return render_template('404.html'), 404", "def page_not_found(e):\n return render_template(\"404.html\"), 404", "def page_not_found(e):\n return render_template(\"error/404.html\"), 404", "def page_not_found():\n return render_template(\"errors/404.html\"), 404", "def handle_notfound( environ ):\n return 404, [], make_error( 'Not Found', environ[ 'PATH_INFO' ] )", "def view_404(request, url = None):\n res = render_to_response(\"404.html\", {\"PAGE_URL\": request.get_full_path()},context_instance=RequestContext(request))\n res.status_code = 404\n return res", "def page_not_found(e):\n\n # Respons to api request\n if request.accept_mimetypes.accept_json and \\\n not request.accept_mimetypes.accept_html:\n resp = jsonify({'error': 'not found'})\n resp.status_code = 404\n return resp\n\n return render_template('errors/404.html'), 404", "def page_not_found(er):\n return render_template('errors.html'), 404", "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "def page_not_found(error):\n return 'Esta Pagina no existe', 404", "def error404(e) -> tuple:\n return render_template('404.html'), 404", "def page_not_found(_):\n return ANSWER_PAGE_NOT_FOUND, 404", "def page_not_found(_error):\n return render_template('404.html'), 404", "def resource_not_found(exc, request):\r\n request.response_status = \"404 Not Found\"\r\n return {'message': str(exc)}", "def not_found(error):\n return make_response(jsonify({'error': 'Resource not found'}), 404)", "def page_not_found(e):\n # Message to the user\n message = {\n \"err\":\n {\n \"msg\": \"This route is currently not supported. Please refer API documentation.\"\n }\n }\n # Making the message looks good\n resp = jsonify(message)\n # Sending OK response\n resp.status_code = 404\n # Returning the object\n return resp", "def page_not_found(e):\n\n return render_template('404.html'), 404", "def not_found(error): # pylint: disable=unused-argument\n response = jsonify(\n {\"success\": False, \"error_code\": 404, \"message\": \"Not Found\"}\n )\n return response, 404", "def not_found(error=None):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url\n }\n resp = jsonify(message)\n resp.status_code = 404\n\n return resp", "def not_found(error):\n pass", "def badRequest(message):\r\n raise Http404(message)", "def handle_404(request):\n return handle_error(request, django.http.HttpResponseNotFound(),\n \"404.html\")", "def error404(ex):\n # logger.error(ex)\n return \"error 404 : {0}\".format(ex.body)", "def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404", "def response_not_found():\n\n return b\"HTTP/1.1 404 Not Found\\r\\n\"", "def test_make_request_error(self):\n response = Helper.make_request(self.url_404)\n self.assertEqual(response.status_code, 404)", "def display_404(error):\n return render_template('/error.html'), 404", "def error_404(self):\n response = self.render_template('404.html')\n response.status_code = 404\n return response", "def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)", "def handler404(request):\n response = render_to_response('404.html', {})\n response.status_code = 404 # Other errors can be similarly configured\n return response", "def view(self, url):\r\n abort(404)", "def page_not_found(e):\n return render_template(\"404.html\", page_title=404)", "def _page_not_found():\n return render_template(\n \"error.html\",\n title=\"Page Not Found\"\n ), 404", "def page_not_found(er): \n return render_template('errors.html'), 400", "def page_not_found(e):\n return render_template('404.html')", "def page_not_found(e):\n return jsonify({\"error\": \"page not found\"})", "def not_found_error(error):\n current_app.logger.info(error)\n return error, \"404\"", "def page_not_found(e):\n\treturn 'Sorry, No Valid Game There.', 404", "def not_found(environ, start_response):\n start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])\n return [str.enocde('Not Found')]", "def handler404(request, *args, **argv):\n response = render_to_response('404.html', {})\n response.status_code = 404\n return response", "def page_not_found(error):\n\n return render_template('/errors/404.html'), 404", "def test_404(self):\n response = self.make_call(origin='Milano Lambrate', destination='Milano Cadorna')\n self.assert400(response)", "def send404(self):\n\t\tself.send_error(404, \"File not found\")", "def url_error():\n try:\n from urllib.error import URLError\n except ImportError:\n from urllib2 import URLError # suppress(import-error)\n\n return URLError", "def not_found_404(error):\n return jsonify({\n 'success': False,\n 'message': 'Resource not found',\n 'error': 404\n }), 404", "def page_not_found(error):\n return '<h1> 404 - Not Found</h1>', 404", "def page_not_found(path):\n return jsonify({\"message\": \"Page not found\"}), HTTPStatus.NOT_FOUND.value", "def test_request_returns_404(client):\n assert client.get(\"/url_que_nao_existe\").status_code == 404", "def error_bad_url(self):\n self._error(400, \"Bad Request\")", "def page_not_found(error):\n return render_template('error.html', error_msg=\"404 Page Not Found\", pagetitle=\"404 Page Not Found\"), 404", "def not_found(error):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n 'error': error}\n resp = jsonify(message)\n return resp", "def page_not_found(error):\n return render_template(\"page_not_found.html\"), 404", "def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data", "def not_found_error(error):\n return render_template('errors/404.html'), 404", "def not_found(error):\n\n return render_template('errors/404.html'), 404", "def err404():\n return render_template('404.html', year=datetime.now().year)", "def handle_not_found(exception):\n return jsonify({\n 'message': 'Resource not found'\n }), 404", "def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404", "def view(self, url):\n abort(404)", "def page_not_found(request, exception): # noqa\n return render(request, 'misc/404.html', status=404)", "def error_404(error):\n\n # Delete the error variable as unused\n del error\n # Render 404 page\n return render_template('404.html'), 404", "def url_was_found(url=\"localhost:5000/health\"):\n res = requests.get(url).json()\n\n if res['status_code'] == 200:\n return True\n elif res['status_code'] == 404:\n return False\n else:\n raise UnexpectedResponseError(\"Expected 200 OK or 404, got {}.\\n\".format(res['status']), \"Full response : {}\".format(res))", "def error(self, code, message = ''):\n self.response.set_status(404)\n raise Exception(message)", "def error():\n return render_template(\"404.html\")", "def _request(self, url):\n response = requests.get(url, headers=self.header)\n\n if str(response.status_code).startswith('2'):\n return response\n\n raise Exception(\"URI request returned an error. Error Code \" + str(response.status_code))", "def page_not_found(e):\n # Message to the articles\n message = {\n \"err\":\n {\n \"msg\": \"This route is currently not supported. Please refer API documentation (on Github https://github.com/wdelenclos/messier-registry.)\"\n }\n }\n # Making the message looks good\n resp = jsonify(message)\n # Sending OK response\n resp.status_code = 404\n # Returning the article\n return resp", "def handler404(request):\n response = render_to_response('404.html', {}, RequestContext(request))\n response.status_code = 404\n return response", "def page_not_found(er):\n return render_template('errors.html'), 500", "def invalid_route(e):\n return render_template(\"404.html\")" ]
[ "0.77854127", "0.7611726", "0.7589559", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7564481", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.7528948", "0.74066204", "0.73611426", "0.73296165", "0.7238029", "0.71652734", "0.7152119", "0.7152119", "0.7143972", "0.70972544", "0.7094572", "0.7076098", "0.7047391", "0.70409393", "0.7040103", "0.7040103", "0.70378816", "0.703776", "0.7036967", "0.70340556", "0.7033511", "0.70303756", "0.7015987", "0.70091677", "0.7009127", "0.7005594", "0.70034933", "0.699273", "0.69904333", "0.6983367", "0.69816476", "0.6954897", "0.6937298", "0.6931408", "0.69301456", "0.6917437", "0.6914702", "0.6902473", "0.6894535", "0.6891675", "0.68841237", "0.6880293", "0.68710077", "0.6857003", "0.6834328", "0.68160415", "0.6815824", "0.680971", "0.68088675", "0.67941743", "0.6778945", "0.67750674", "0.6772455", "0.6770765", "0.6759465", "0.6758592", "0.67512524", "0.6750709", "0.67486495", "0.6746673", "0.6742853", "0.67338115", "0.6713135", "0.6699819", "0.6697061", "0.6696493", "0.6690446", "0.66693294", "0.666275", "0.6662148", "0.6640806", "0.6637104", "0.6632679", "0.66305995", "0.6612096", "0.6605409", "0.66049296", "0.6604624", "0.66013026", "0.65792185", "0.65767133", "0.65679747", "0.65631294", "0.65254205" ]
0.0
-1
Return a fake assertion after checking that the input is what we expect.
def token_callback(_request, _uri, headers): assert b'assertion=fake_saml_assertion' in _request.body assert b'company_id=NCC1701D' in _request.body assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body return (200, headers, '{"access_token": "faketoken"}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_func(input, expected):\n from parenthetics import paren\n assert paren(input) == expected", "def test_example():\n answer = True\n expected = True\n assert answer == expected", "def expected_value(expected, actual):\n assert expected == actual", "def validate_Assert(result, _dummy_condition):\n return result", "def _expected_inputs():", "def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true", "def test_mock_input():\n mock_message = mock.Mock()\n mock_words = mock.Mock()\n with pytest.raises(TypeError):\n message_checker(mock_message, mock_words)", "def test_absolute_truth():\n assert True", "def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)", "def assertion_passed(self, func):", "async def test_assertion_rewriting(_):\n with pytest.raises(AssertionError) as e:\n assert 1 == 42\n assert \"42\" in str(e), f\"Assertion rewriting seems not to work, message was {e}\"", "def test_bad_input(alice):\n with pytest.raises(TypeError):\n alice.pack(\"blah\")", "def check_input_matches_expected_output(in_, out):\n ...", "def test_expect_ok(self) -> None:\n assert Ok(2).expect(\"err\") == 2", "def _expected_inputs():\n return 1", "def assertOutput(cls, expected, actual):\n if expected != actual:\n raise Exception(\"'\" + expected + \"' != '\" + actual + \"'\")", "def test_fun(email: str, expected: bool):\n assert fun(email) is expected", "def test_ask_yes_no_5(self, input_mock):\n input_mock.side_effect = [\"invalid\", \"invalid\", \"Y\"]\n response = basic.ask_yes_no(response_attempt=3)\n self.assertTrue(response)", "def do_test_expected(self):\n self.maxDiff = None\n\n got = \"\"\n if 'error' in test_src:\n self.assertRaises(test_src['error'], tested_function,\n test_src['in'], options)\n else:\n want = test_src['out']\n got = tested_function(test_src['in'], options)\n logging.debug('got = type %s', type(got))\n logging.debug(\"test_src['out'] = %s\",\n unicode(test_src['out']))\n self.assertEqual(got, want, \"\"\"Result matches\n expected = %s\n\n observed = %s\n \"\"\" % (want, got))", "def test_exactly(self):\n\n x = t.Exactly(\"x\")\n self.assertEqual(writePython(x),\n dd(\"\"\"\n _G_exactly_1, lastError = self.exactly('x')\n self.considerError(lastError, None)\n _G_exactly_1\n \"\"\"))", "def boolean():\n\n bool(Assert(1))\n\n with Assert.raises(AssertionError):\n bool(Assert(0))", "def test_raw_input_ex(input_output):\n with mock.patch.object(builtins, 'input', lambda _: input_output):\n assert GC.raw_input_ex() == input_output", "def __my_assert_args(function, args, expected_output, check_type=False):\n argstr = str(args).replace(',)', ')')\n output = function(*args)\n\n # Controleer eerst het return-type (optioneel)\n if check_type:\n msg = f\"Fout: {function.__name__}{argstr} geeft geen {type(expected_output)} terug als return-type\"\n assert type(output) is type(expected_output), msg\n\n # Controleer of de functie-uitvoer overeenkomt met de gewenste uitvoer\n if str(expected_output) == str(output):\n msg = f\"Fout: {function.__name__}{argstr} geeft {output} ({type(output).__name__}) \" \\\n f\"in plaats van {expected_output} (type {type(expected_output).__name__})\"\n else:\n msg = f\"Fout: {function.__name__}{argstr} geeft {output} in plaats van {expected_output}\"\n\n if type(expected_output) is float and isinstance(output, (int, float, complex)):\n # Vergelijk bij float als return-type op 7 decimalen om afrondingsfouten te omzeilen\n assert round(output - expected_output, 7) == 0, msg\n else:\n assert output == expected_output, msg", "def test_single_value(self, test_input, expected, sc):\n assert sc.add(test_input) == expected", "def test_should_be_ok(self):\n self.assertTrue(True)", "def test_of_with_args(self) -> None:\n assert Result.of(lambda x: bool(x > 0), 1).unwrap() is True", "def boolean():\r\n\r\n bool(Assert(1))\r\n\r\n with Assert.raises(AssertionError):\r\n bool(Assert(0))", "def _expected_inputs():\n return 3", "def test_allowed(self):\n user_input = [\"1\",\"1\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You can take this course.\")", "def test_input1(self):\n in1 =\"elephant\"\n result = options.check(in1)\n self.assertEqual(result,True)", "def test_111(self):\n user_input = [\"1\",\"1\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_input2(self):\n in1 =\"aple\"\n result = options.checkw(in1)\n self.assertEqual(result,False)", "def test_wrong_input(self):\n\n test_float = 2954.02\n test_list = [\"anagram\", \"gramana\"]\n with pytest.raises(AttributeError) as exc_info:\n is_anagram(test_float, test_list)\n expected_error_msg = \"Words must be strings!\"\n assert exc_info.match(expected_error_msg)", "def test_001(self):\n user_input = [\"0\",\"0\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def assert_verbose(actual, expected):\n assert expected == actual, f\"Expected value: {expected}. But actual value is {actual}\"", "def test_011(self):\n user_input = [\"0\",\"1\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_000(self):\n user_input = [\"0\",\"0\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_101(self):\n user_input = [\"1\",\"0\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_100(self):\n user_input = [\"1\",\"0\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_check_is_required(fake_check):\n assert fake_check.is_required()", "def test_010(self):\n user_input = [\"0\",\"1\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')", "def _assert_raises_assertion(expected_message):\n\n class Context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is None:\n raise AssertionError(\"no AssertionError raised\")\n if exc_type != AssertionError:\n return False\n if str(exc_val) != expected_message:\n raise AssertionError(\n \"expected exception message {!r}, got {!r}\".format(\n expected_message, str(exc_val)\n )\n )\n return True\n\n return Context()", "def test_return_final_seq_user_input_valid():\n for valid_case in [True, False]:\n assert RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 7]],\n hidden_activations=\"relu\", initialiser=\"xavier\", return_final_seq_only=valid_case, input_dim=15)\n\n for invalid_case in [[True], 22, [1, 3], (True, False), (5, False)]:\n with pytest.raises(AssertionError):\n print(invalid_case)\n RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 7]],\n hidden_activations=\"relu\", initialiser=\"xavier\", return_final_seq_only=invalid_case, input_dim=15)", "def test_is_ok(self) -> None:\n assert Ok(1).is_ok()\n assert not Err(1).is_ok()", "def _verify(\n hass,\n expected_state,\n expected_percentage,\n expected_oscillating,\n expected_direction,\n expected_preset_mode,\n):\n state = hass.states.get(_TEST_FAN)\n attributes = state.attributes\n assert state.state == str(expected_state)\n assert attributes.get(ATTR_PERCENTAGE) == expected_percentage\n assert attributes.get(ATTR_OSCILLATING) == expected_oscillating\n assert attributes.get(ATTR_DIRECTION) == expected_direction\n assert attributes.get(ATTR_PRESET_MODE) == expected_preset_mode", "def check(input, options):\r\n expected = [(o, o) for o in options]\r\n self.assertEqual(f(input), expected)", "def test_task178e(input_value, expected_value):\r\n assert algo.Task178e.main_logic(input_value) == expected_value", "def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0):\n if actual.dtype.kind == 'O':\n dtype = np.array(actual.flat[0]).dtype\n actual, desired = actual.astype(dtype), desired.astype(dtype)\n assert_allclose(actual, desired, rtol, atol)", "def test_example():\n x = 0\n y = 1\n assert x != y", "def test_ask_yes_no_2(self, input_mock):\n response = basic.ask_yes_no()\n self.assertFalse(response)", "def be(self, expected):\n self.do_assertion(self.subject == expected, \"be\", expected)\n return self", "def test_utils_to_bool(self, tcex, input_, expected):\n result = tcex.utils.to_bool(input_)\n assert result == expected, f'Input {input_} result of {result} != {expected}'", "def test_ask_yes_no_1(self, input_mock):\n response = basic.ask_yes_no()\n self.assertTrue(response)", "def test_valid_input_succeeds(self, async_patch, chan_patch):\n self.assertTrue(send_rotate_to_can(self.USER, self.BIN_NUM))\n async_patch.assert_called_once()\n chan_patch.assert_called_once()", "def assert_same(result, expect):\n assert sorted(result) == sorted(expect)", "def test_validate_answer(self):\r\n problem = self.build_problem(answer=\"42\")\r\n responder = problem.responders.values()[0]\r\n self.assertTrue(responder.validate_answer('23.5'))\r\n self.assertFalse(responder.validate_answer('fish'))", "def verify_ret(self, ret, expected_ret):\n assert ret == expected_ret, (\n \"Function should return: \"\n + ret_vals_dictionary[expected_ret]\n + \".\\nInstead returned: \"\n + ret_vals_dictionary[ret]\n )", "def test_calculate_contract_fee(a, b, expected):\n assert calculate_contract_fee(a, b) == expected", "def test_check_inputs():\n # Check inputs list - Pass\n assert layer_util.check_inputs([], 0) is None\n\n # Check inputs tuple - Pass\n assert layer_util.check_inputs((), 0) is None\n\n # Check inputs int - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs(0, 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple\" in msg\n\n # Check inputs float - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs(0.0, 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple\" in msg\n\n # Check size float - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs([1], 0.5)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n\n # Check size 0 - Pass\n assert layer_util.check_inputs([], 0) is None\n assert layer_util.check_inputs((), 0) is None\n\n # Check size 0 - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs([0], 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs((0,), 0)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n\n # Check size 1 - Pass\n assert layer_util.check_inputs([0], 1) is None\n assert layer_util.check_inputs((0,), 1) is None\n\n # Check size 1 - Fail\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs([], 1)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs((), 1)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Inputs should be a list or tuple of size\" in msg\n\n # Check msg spacing - Pass\n with pytest.raises(ValueError) as execinfo:\n layer_util.check_inputs(0, 0, msg=\"Start of message\")\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"Start of message\" in msg", "def test_valid_email():\n assert_equals(is_valid_email(\"[email protected]\") is None, False)", "def assert_that(matchee, matcher, message='', verbose=False):\n matcher = Annotate.if_message(message, matcher)\n mismatch = matcher.match(matchee)\n if not mismatch:\n return\n raise MismatchError(matchee, matcher, mismatch, verbose)", "def test_check_validity(game):\n\n game.solve()\n assert game.check_validity()", "def test_example():\n with pytest.raises(\n AssertionError,\n match=expected_error_match,\n ):\n actual = {\n \"test1\": 1,\n \"test2\": \"foo\",\n \"bar\": {\"cheese\": \"parrot\", \"rabbit\": [\"black\", \"knight\"], \"other\": \"oops\"},\n }\n assert actual == Alike(\n {\n \"something\": A.is_missing,\n \"test2\": \"foo\",\n \"test1\": A < 2,\n \"bar\": {\n \"cheese\": A.is_present,\n \"rabbit\": [\"black\", \"wrong\"],\n \"other\": A.is_missing,\n },\n }\n )", "def _Assert(self, t):\n self.RaiseError(t, \"Assert not supported\")", "def equality():\r\n\r\n Assert(1) == 1\r\n Assert(1) != 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) == 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) != 1", "def test_validates(if_statement_validator):\n test = {\n 'condition': 'is',\n 'target': 'bob',\n 'then': 'arne',\n }\n assert if_statement_validator(test).unwrap() == test", "def _value_assert(current_key, actual_value, expected_value):\n if actual_value is None:\n return\n if isinstance(actual_value, list) and isinstance(expected_value, list):\n _list_assert(actual_value, expected_value)\n elif isinstance(actual_value, dict) and isinstance(expected_value, dict):\n _dict_assert(actual_value, expected_value)\n else:\n assert actual_value == expected_value, \"key: {}\".format(current_key)", "def equality():\n\n Assert(1) == 1\n Assert(1) != 0\n\n with Assert.raises(AssertionError):\n Assert(1) == 0\n\n with Assert.raises(AssertionError):\n Assert(1) != 1", "def do_test_expected(self):\n self.maxDiff = None\n\n # We currently don't throw any exceptions in Writer, so this\n # this is always false\n if 'error' in test_src:\n self.assertRaises(test_src['error'], yamlish.dumps,\n test_src['in'], options)\n else:\n logging.debug(\"out:\\n%s\", textwrap.dedent(test_src['out']))\n want = yaml.load(textwrap.dedent(test_src['out']))\n logging.debug(\"want:\\n%s\", want)\n with tempfile.NamedTemporaryFile() as test_file:\n tested_function(test_src['in'], test_file)\n test_file.seek(0)\n got_str = test_file.read()\n logging.debug(\"got_str = %s\", got_str)\n got = yaml.load(got_str)\n self.assertEqual(got, want, \"Result matches\")", "def check_result(context, expected):\n assert context.result == expected, \"Wrong result: {r} != {e}\".format(\n r=context.result, e=expected\n )", "def expected(x, y):", "def expected(x, y):", "def expected(x, y):", "def test_validate_input_valid(self):\n final_config = self.dtm1.validate_input('00001111')\n nose.assert_equal(final_config[0], 'q4')\n nose.assert_equal(str(final_config[1]), 'TMTape(\\'xxxxyyyy.\\')')", "def assert_state(actual: State | None, expected: State | None) -> None:\n if actual is None or expected is None:\n assert actual == expected\n return\n assert actual.entity_id == expected.entity_id\n assert actual.state == expected.state\n assert actual.attributes == expected.attributes", "def _assert_wrapper(obj1, obj2, expected_type=None, do_raise=False, **kwargs):\n try:\n _assert_equal(obj1, obj2, expected_type=expected_type, **kwargs)\n except AssertionError:\n if do_raise or hasattr(brewtils.test, \"_running_tests\"):\n raise\n return False\n\n return True", "def test_assert_bytes():\n if backwards.PY2: # pragma: Python 2\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes('hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n unicode('hello'))\n else: # pragma: Python 3\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes(b'hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n 'hello')", "def assertValue(self, indata, expected_output, message=None):\n outstream = StringIO()\n giganticGrep(indata, outstream)\n value = outstream.getvalue()\n self.assertEqual(value, expected_output, message)", "def verify():", "def assert_is(self, first, second, msg=None):\r\n assert first is second", "def verify_is(self, first, second, msg=None):\r\n try:\r\n self.assert_is(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def fake_input(inputs):\n it = iter(inputs)\n def mock_input(prompt=''):\n try:\n return next(it)\n except StopIteration as e:\n raise EOFError('No more inputs given') from e\n\n return patch('builtins.input', mock_input)", "def test__validate_message_notification__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_message_notification(input_value)", "def _assert(condition, message):\n if not condition:\n raise AssertionError(message)", "def assert_equal(self, first, second, msg=\"\"):\r\n assert first == second", "def _not_assert(function):\n @wraps(function)\n def flipped(*args, **kwargs):\n try:\n function(*args, **kwargs)\n raise AssertionError()\n except AssertionError:\n return\n return flipped", "def get_verified_input(prompt, verify_by_func, msg_wrong=None):\n if msg_wrong is None:\n # stock error message\n msg_wrong = \"Invalid input.\"\n\n # raw\n answer = input_centered(prompt)\n # the answer must match conditions of verification\n while not verify_by_func(answer):\n sys_comment(msg_wrong, is_error=True)\n answer = input_centered(prompt)\n \n # at this point, the answer is verified to be valid\n return answer", "def make_assertions(input_pipe, other_pipes, output_pipe):\n assert isinstance(input_pipe, elements.InPypElement), 'Wrong input element type, want a InPypElement!'\n assert isinstance(output_pipe, elements.OutPypElement), 'Wrong output element type, want a OutPypElement!'\n for other_pipe in other_pipes:\n assert isinstance(other_pipe, elements.MidPypElement), 'Wrong middle element type, want a MidPypElement!'", "def test_task555(input_value, expected_value):\r\n assert list(algo.Task555.main_logic(input_value)) == expected_value", "def verify(case_name, test_input, test_target, test_func):\n actual_output = test_func(*test_input)\n print(case_name, test_input, ' target:', test_target,\n ' output:', actual_output)\n assert(test_target == actual_output)", "def verify(case_name, test_input, test_target, test_func):\n actual_output = test_func(*test_input)\n print(case_name, test_input, ' target:', test_target,\n ' output:', actual_output)\n assert(test_target == actual_output)", "def assert_equals(expected,received,message=None):\n if (expected != received):\n if message is None:\n message = 'assert_equals: expected %s but instead got %s' % (repr(expected),repr(received))\n quit_with_error(message)", "def test_always_succeed():\n assert True", "def assert_almost_equal(actual, desired, decimal=7):\n actual, desired = check_and_drop_units(actual, desired)\n numpy.testing.assert_almost_equal(actual, desired, decimal)", "def test_out_of_order(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"tan\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')", "def test_casting_without_iterable(test_fixture, test_input, expected):\n test_fixture.cast_prop = test_input\n assert test_input == test_fixture.cast_prop == expected\n assert type(test_fixture.cast_prop) == type(expected)", "def test_inputs_are_needed():\n with pytest.raises(TypeError):\n song_decoder()", "def test_task88d(input_value, expected_value):\r\n assert algo.Task88d.main_logic(input_value) == expected_value", "def test_expect_raising(self, exc_cls: t.Type[Exception]) -> None:\n exp_exc: t.Type[Exception] = exc_cls if exc_cls else RuntimeError\n kwargs = {\"exc_cls\": exc_cls} if exc_cls else {}\n input_val = 2\n msg = \"not what I expected\"\n\n with pytest.raises(exp_exc) as exc_info:\n Err(input_val).expect(msg, **kwargs)\n\n assert msg in str(exc_info.value)\n assert str(input_val) in str(exc_info.value)", "def test_validate_characters(self, input_string, expected_result):\n with patch('sys.exit', autospec=True) as m_sys_exit:\n # Call method under test\n test_result = validate_characters(input_string)\n\n # Assert expected result\n self.assertEqual(expected_result, test_result)" ]
[ "0.65425646", "0.64745665", "0.64722836", "0.644988", "0.64093584", "0.62563604", "0.6232764", "0.6228062", "0.61891055", "0.6131768", "0.61279905", "0.60960275", "0.6095586", "0.6073744", "0.6061325", "0.6057807", "0.6056131", "0.6024911", "0.60140353", "0.59895396", "0.59707934", "0.5945561", "0.59387034", "0.5938588", "0.58853185", "0.58779347", "0.5876655", "0.5873431", "0.5869287", "0.5868256", "0.58566743", "0.58403546", "0.577501", "0.5770783", "0.5766861", "0.57634246", "0.575159", "0.5737537", "0.57206964", "0.5714878", "0.5712819", "0.571122", "0.57014865", "0.5680025", "0.56748104", "0.5670145", "0.56687707", "0.5666172", "0.5656969", "0.56377673", "0.56368566", "0.5629889", "0.5617897", "0.56133485", "0.5605884", "0.5604428", "0.5600174", "0.55893093", "0.5584112", "0.55792284", "0.55626225", "0.55619776", "0.55485886", "0.5546445", "0.55397165", "0.5533613", "0.5526304", "0.5522811", "0.55164987", "0.5512901", "0.5510327", "0.55091757", "0.55091757", "0.55091757", "0.54952115", "0.5495066", "0.5489342", "0.5480594", "0.5476496", "0.5476398", "0.54760194", "0.547101", "0.5467745", "0.54652965", "0.5461695", "0.54527295", "0.5452559", "0.54517406", "0.5450578", "0.54487556", "0.5445043", "0.5445043", "0.544498", "0.54418695", "0.54410887", "0.54391783", "0.5436511", "0.54317313", "0.54305494", "0.543025", "0.54294294" ]
0.0
-1
Mock an error response when calling the OData API for user details.
def _mock_odata_api_for_error(self, odata_api_root_url, username): def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument """ Return a 500 error when someone tries to call the URL. """ headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d' headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number return 500, headers, 'Failure!' fields = ','.join(SapSuccessFactorsIdentityProvider.default_field_mapping.copy()) url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format( root_url=odata_api_root_url, user_id=username, fields=fields, ) httpretty.register_uri(httpretty.GET, url, body=callback, content_type='application/json') return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_response_error(self):\n r = mock.Mock(spec=requests.Response)\n r.content = \"{'normal': 'resource'}\"\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n\n r.status_code = 404\n self.assertRaises(exceptions.HTTPNotFound, f.user_profile_get)\n\n r.status_code = 409\n self.assertRaises(exceptions.HTTPConflict, f.user_profile_get)\n\n r.status_code = 500\n self.assertRaises(exceptions.HTTPServerError, f.user_profile_get)\n\n r.status_code = 499\n self.assertRaises(exceptions.HTTPBadRequest, f.user_profile_get)", "def test_api_user_get(self):\n pass", "def test_response_auth(self):\n r = mock.Mock(spec=requests.Response)\n r.status_code = 401\n r.content = \"{'normal': 'resource'}\"\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n\n self.assertRaises(exceptions.HTTPUnauthorized, f.user_profile_get)\n\n r.status_code = 403\n self.assertRaises(exceptions.HTTPForbidden, f.user_profile_get)", "def test_004_get_user_not_found(self, mock_db_query):\n mock_db_query.get.return_value = None\n\n response = self.app.get('/v1/users/0', headers={'accept': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 404)\n self.assertIn('User not found', response.get_data().decode())", "def test_broken_odata_details(self, mock_response):\n message = {\n \"error\": {\n \"code\": \"Conflict\",\n \"message\": \"The maximum number of Free ServerFarms allowed in a Subscription is 10.\",\n \"target\": None,\n \"details\": [\n {\"message\": \"The maximum number of Free ServerFarms allowed in a Subscription is 10.\"},\n {\"code\": \"Conflict\"},\n {\n \"errorentity\": {\n \"code\": \"Conflict\",\n \"message\": \"The maximum number of Free ServerFarms allowed in a Subscription is 10.\",\n \"extendedCode\": \"59301\",\n \"messageTemplate\": \"The maximum number of {0} ServerFarms allowed in a Subscription is {1}.\",\n \"parameters\": [\"Free\", \"10\"],\n \"innerErrors\": None,\n }\n },\n ],\n \"innererror\": None,\n }\n }\n exp = HttpResponseError(response=mock_response(json.dumps(message).encode(\"utf-8\")))\n assert exp.error.code == \"Conflict\"", "def test_search_user_fail_on_non_200_response(self) -> None:\n responses.add(responses.GET, local_app.config['SEARCHSERVICE_BASE'] + SEARCH_USER_ENDPOINT,\n json=self.mock_search_table_results, status=HTTPStatus.INTERNAL_SERVER_ERROR)\n\n with local_app.test_client() as test:\n response = test.get('/api/search/v0/user', query_string=dict(query='test', page_index='0'))\n self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)", "def test_get_user_fail_unauthorised():\n\n client = APIClient()\n\n response = client.get(reverse(\"user-detail\"), format=\"json\")\n assert response.status_code == status.HTTP_401_UNAUTHORIZED", "def test_003_get_user(self, mock_db_query):\n mock_db_query.get.return_value = seller1\n\n response = self.app.get('/v1/users/' + str(seller1.identity), headers={'accept': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json['first_name'], 'Lisa')", "async def test_bad_retrieve_user_data(self, m):\n with self.assertRaises(aiohttp.web_exceptions.HTTPInternalServerError):\n await retrieve_user_data(\"bad_token\")", "def test_user_info_without_header(self, app, auth_user):\n data = UserInfo.random()\n us_info = app.user_info.add_user_info(data=data, user_id=auth_user.uuid,\n header=None, type_response=AuthInvalidResponse)\n assert us_info.status_code == 401, \"Check status code\"\n assert us_info.data.description == ResponseText.DESCRIPTION_AUTH_ERROR\n assert us_info.data.error == ResponseText.ERROR_AUTH_TEXT\n assert us_info.data.status_code == 401, \"Check status code\"", "def test_user_retrieve(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.get(reverse(\"account:user-profile\"))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('user').get('email'), \"[email protected]\")", "def test_unavailable_introspection_endpoint(self) -> None:\n request = Mock(args={})\n request.args[b\"access_token\"] = [b\"mockAccessToken\"]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n\n # The introspection endpoint is returning an error.\n self.http_client.request = AsyncMock(\n return_value=FakeResponse(code=500, body=b\"Internal Server Error\")\n )\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)\n\n # The introspection endpoint request fails.\n self.http_client.request = AsyncMock(side_effect=Exception())\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)\n\n # The introspection endpoint does not return a JSON object.\n self.http_client.request = AsyncMock(\n return_value=FakeResponse.json(\n code=200, payload=[\"this is an array\", \"not an object\"]\n )\n )\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)\n\n # The introspection endpoint does not return valid JSON.\n self.http_client.request = AsyncMock(\n return_value=FakeResponse(code=200, body=b\"this is not valid JSON\")\n )\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)", "def test_unknown_user(self):\n self.sign_in()\n response = self.client.get(reverse('backend:user_details', args=(0,)))\n self.assertEqual(response.status_code, 404)", "def test_call_httperror(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(500)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)", "def test_detail_requests_after_authentication(self):\n print(f'cls.user1={self.user1}')\n user_detail_url = reverse('user-detail',kwargs={'pk':1})\n self.token = Token.objects.create(user=self.user1)\n self.client.credentials(HTTP_AUTHORIZATION='Token '+self.token.key)\n response = self.client.get(user_detail_url)\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n\n response_patch = self.client.patch(user_detail_url,{\n 'username': 'random_user', 'password': 'passwrodaosida123'\n })\n print(f'response_patch data={response_patch.data}')\n self.assertEqual(response_patch.data,\n {'id': 1, 'username': 'random_user', 'first_name': 'testuser', 'last_name': 'rajula', 'email': ''})\n self.assertEqual(response_patch.status_code,status.HTTP_200_OK)\n\n response = self.client.get(user_detail_url)\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n self.assertEqual(response.data['username'],'random_user')", "def test_request_users_user(self):\n response = requests.get(self.url + '/users/John')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(response.json())", "def test_sees_error_message_if_username_doesnt_exist(self):\n response = self.app.post(\n \"/api/users/login\",\n data=json.dumps(\n dict(\n email=USER_DATA[\"email\"] + \"x\",\n password=USER_DATA[\"credential1\"],\n )\n ),\n content_type=\"application/json\",\n follow_redirects=True,\n )\n res = response.data.decode(\"ASCII\")\n res = json.loads(res)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(\n res[\"message\"], \"Invalid email, Please try again\"\n )", "def test_show_failure(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.side_effect = ValueError('No JSON object could be decoded')\n mock_get.return_value = mock_response\n\n with self.assertRaises(ValueError):\n # Call the method\n self.policies.show(id=333114)", "def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def test_lti20_request_handler_bad_user(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n self.system.get_real_user = Mock(return_value=None)\r\n mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n self.assertEqual(response.status_code, 404)", "def test_get_sdb_id_invalid_response(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_sdb_id('some_id')", "def test_fetch_user(self):\n\n self.register_user()\n\n self.assertEqual(self.fetch_user_details().status_code, 200)\n\n self.assertTrue(self.fetch_user_details(\n ).json[\"data\"][0][\"username\"] == 'Bjorn')", "def test_get_single_user_is_missing(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/999')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])", "def test_unauthorized_user(self):\n response_decoded_json = requests.post(URL_AUTH['url_login'], \n data=json.dumps(AUTH_PAYLOADS['payload_unauth']),\n headers=HEADER['header'])\n mes = response_decoded_json.json()\n assert 400 == response_decoded_json.status_code, \"You have BAD REQUEST\"\n assert \"User not found\" == mes, \"There is unexpected ability to login as unknown user\"", "def test_response_ok(self):\n r = mock.Mock(spec=requests.Response)\n r.status_code = 200\n r.content = '{\"normal\": \"resource\"}'\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n f.user_profile_get()\n\n r.status_code = 202\n f.user_profile_get()\n\n r.status_code = 204\n f.user_profile_get()", "def test_retrive_user(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['email'], self.user.email)\n self.assertEqual(res.data['name'], self.user.name)\n self.assertNotIn('password', res.data)", "def test_login_success_no_userinfo(self):\n UserInfo.objects.filter(user=self.user).delete()\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"username\": self.USERNAME,\n \"password\": self.PASSWORD,\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 200, resp.content.decode('utf-8')\n json_data = json.loads(resp.content.decode('utf-8'))\n assert json_data['name'] == self.user.email", "def test_obtain_issues_response_error(self, mock_error, mock_url_read):\n mock_url_read.return_value = 'non-json'\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)\n self.assertEqual(mock_error.call_args[0][0], \"Error loading json: %s.\")\n self.assertIsInstance(mock_error.call_args[0][1], ValueError)", "def test_get_single_user_no_id(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/blah')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])", "def test_nonexistent_user_login(self):\n\n fake_user = {\"email\": \"[email protected]\",\n \"password\": \"lolzIKid\"}\n\n res = self.client.post(\n \"/api/v2/auth/login\", data=json.dumps(fake_user), content_type=\"application/json\")\n result = json.loads(res.data)\n self.assertEqual(result[\"Error\"], \"User does not exist\")\n self.assertEqual(res.status_code, 404)", "def test_get_other_user(self):\n url = self.get_url(self.inactive_user.id)\n self.inactive_user.is_active = True\n self.inactive_user.save()\n\n with authenticated_user_api_client(self.client, self.active_user):\n expected_response_code = 200\n expected_fields = set(User.get_public_fields())\n response = self.client.get(url)\n\n response_fields = set(response.data.keys())\n self.assertEqual(response.status_code, expected_response_code)\n self.assertEqual(response_fields, expected_fields)", "def test_user_profile_view_success(self):\n params = {'pk': self.user.id}\n profile_response = self.client.get(reverse('api:users-detail', kwargs=params))\n self.assertTrue(profile_response.status_code == 200)\n user_data = profile_response.data\n self.assertTrue(user_data.get('username') == self.user.username)\n self.assertTrue(user_data.get('game_nickname') == self.user.game_nickname)\n self.assertTrue(user_data.get('email') == self.user.email)\n self.assertTrue(user_data.get('description') == self.user.description)\n self.assertTrue(user_data.get('gender') == self.user.gender)\n self.assertTrue(user_data.get('coins') == self.user.coins)", "def test_fail_patch_detail_user(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n user_data = {\"email\": \"[email protected]\", \"password\": \"testpassword\"}\n response = client.patch(reverse(\"user-detail\"), user_data, format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED", "def test_getUser(self):\n\t\turl = \"/users/2/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"id\"], 2)\n\t\tself.assertEqual(response.data[\"username\"], \"testUser2\")", "def test_user_get(self):\r\n expected_user = UserFactory.create()\r\n # Test GET all users\r\n res = self.app.get('/api/user')\r\n data = json.loads(res.data)\r\n user = data[0]\r\n assert len(data) == 1, data\r\n assert user['name'] == expected_user.name, data\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # Test GETting a specific user by ID\r\n res = self.app.get('/api/user/1')\r\n data = json.loads(res.data)\r\n user = data\r\n assert user['name'] == expected_user.name, data\r\n\r\n # Test a non-existant ID\r\n res = self.app.get('/api/user/3434209')\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'user', err\r\n assert err['exception_cls'] == 'NotFound', err\r\n assert err['action'] == 'GET', err", "def test_bad_request_auth_user_no_details(self, zendesk_mock_class, datadog_mock):\r\n self._test_bad_request_omit_field(self._auth_user, self._auth_fields, \"details\", zendesk_mock_class, datadog_mock)\r\n self._test_bad_request_empty_field(self._auth_user, self._auth_fields, \"details\", zendesk_mock_class, datadog_mock)", "async def test_invalid_username(setup: SetupTest) -> None:\n user_info = GitHubUserInfo(\n name=\"A User\",\n username=\"invalid user\",\n uid=1000,\n email=\"[email protected]\",\n teams=[],\n )\n\n setup.set_github_token_response(\"some-code\", \"some-github-token\")\n r = await setup.client.get(\n \"/login\",\n headers={\"X-Auth-Request-Redirect\": \"https://example.com\"},\n allow_redirects=False,\n )\n assert r.status_code == 307\n url = urlparse(r.headers[\"Location\"])\n query = parse_qs(url.query)\n\n # Simulate the return from GitHub.\n setup.set_github_userinfo_response(\"some-github-token\", user_info)\n r = await setup.client.get(\n \"/login\",\n params={\"code\": \"some-code\", \"state\": query[\"state\"][0]},\n allow_redirects=False,\n )\n assert r.status_code == 403\n assert r.json() == {\n \"detail\": [\n {\n \"msg\": \"Invalid username: invalid user\",\n \"type\": \"permission_denied\",\n }\n ]\n }", "def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)", "def test_invalid_token_admin(self):\n invalid_token = {\n \"Content-Type\" : \"application/json\",\n \"x-access-token\" : \"eyJ0eXAiOiJK6MTUyNjczNzQ5Nvm2LkbWLZF2RuD32FBvgG8KyM\"}\n response = self.app.get(\n '/api/v3/users',\n headers=invalid_token)\n self.assertEqual(response.status_code, 401)", "def test_client_get_organizations_error(mocker, error_message, capsys):\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mocker.MagicMock(return_value=error_message)\n\n with pytest.raises(ValueError, match=r\"Unable to get your organizations\"):\n test_client.get_organizations()\n\n captured = capsys.readouterr()\n assert \"Server error:\" in captured.out", "def test_user_normal(self):\n expect = {\n \"username\": \"Bob\",\n \"profile-picture\": \"http://hello\",\n \"user-type\": \"user\",\n }\n with mock.patch(\"spotlogin_api.get_user_call\", self.mock_nuser):\n result = spotify_login.get_user(self.user[INPUT])\n self.assertEqual(result, expect)", "def test_access_control_is_superuser_as_superuser_raises_api_error(self):\n # Arrange\n mock_request = create_mock_request(user=self.superuser)\n exception = ApiError(\"\")\n\n # Act # Assert\n with self.assertRaises(ApiError):\n access_control_api.is_superuser(\n mock_function, exception, request=mock_request\n )", "def test_login_wrong_username(self):\n res = self.client.post('api/v2/auth/signup', json=self.user,\n headers={'Content-Type': 'application/json'})\n \n res_other = self.client.post('/api/v2/auth/login', json={\n 'username': 'MrMan', 'password': 'Aw3someSauce'}, headers={'Content-Type': 'application/json'})\n data_other = res_other.get_json()\n\n self.assertEqual(res_other.status_code, 401)\n self.assertEqual(data_other['error'], 'User not found: Please register')", "def assertFailedRequest(self, response_data, expected_error):\r\n self.assertFalse(response_data['success'])\r\n self.assertEquals(expected_error, response_data['error'])\r\n self.assertFalse(self.user.email_user.called)", "def test_user_information_request(self):\n pass", "def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def testGetUserWithoutData(self):\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n deferred = self.facade.getUser(session, u'unknown')\n error = yield self.assertFailure(deferred, TNoSuchUser)\n self.assertEqual(u'unknown', error.name)", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_query_user_profile(user_profile, mock_requests):\n mock_requests(\n {'code': 200,\n 'data': {\n \"UserID\": 10001136,\n \"UserName\": \"rayvision\",\n \"platform\": 2,\n \"phone\": \"15945467254\",\n }})\n assert user_profile.query_user_profile()['UserID'] == 10001136\n assert user_profile.query_user_profile()['UserName'] == \"rayvision\"\n assert user_profile.query_user_profile()['platform'] == 2\n assert user_profile.query_user_profile()['phone'] == \"15945467254\"", "def test_get_single_user(self):\n user = add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get(f'/users/{user.id}')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue('created_at' in data['data'])\n self.assertIn('neilb', data['data']['username'])\n self.assertIn('[email protected]', data['data']['email'])\n self.assertIn('success', data['status'])", "def test_bad_request_anon_user_no_details(self, zendesk_mock_class, datadog_mock):\r\n self._test_bad_request_omit_field(self._anon_user, self._anon_fields, \"details\", zendesk_mock_class, datadog_mock)\r\n self._test_bad_request_empty_field(self._anon_user, self._anon_fields, \"details\", zendesk_mock_class, datadog_mock)", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_superuser(self):\n url = reverse('projectroles:api_user_current')\n response = self.request_knox(url)\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n expected = {\n 'username': self.user.username,\n 'name': self.user.name,\n 'email': self.user.email,\n 'is_superuser': True,\n 'sodar_uuid': str(self.user.sodar_uuid),\n }\n self.assertEqual(response_data, expected)", "def test_get(self):\n url = reverse('projectroles:api_user_current')\n response = self.request_knox(\n url, token=self.get_token(self.domain_user)\n )\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n expected = {\n 'username': self.domain_user.username,\n 'name': self.domain_user.name,\n 'email': self.domain_user.email,\n 'is_superuser': False,\n 'sodar_uuid': str(self.domain_user.sodar_uuid),\n }\n self.assertEqual(response_data, expected)", "def test_users_endpoint_response_with_code_status_200():\n response = api_helper.get_users()\n assert response.status_code == 200", "def test_get_user_info(self, db_mock):\n repo = Repository()\n\n db_instance = db_mock.return_value\n db_instance.get_user_info.return_value = None\n self.assertIsNone(repo.get_user_info(\"123\"))", "def test_user_profile_view_constraint(self):\n another_user = AnotherUserFactory()\n params = {'pk': another_user.id}\n profile_response = self.client.get(reverse('api:users-detail', kwargs=params))\n self.assertTrue(profile_response.status_code == 200)\n user_data = profile_response.data\n self.assertFalse(bool(user_data.get('coins')))\n self.assertFalse(user_data.get('email') == self.user.email)\n self.assertFalse(user_data.get('username') == self.user.username)\n self.assertFalse(user_data.get('description') == self.user.description)\n self.assertFalse(user_data.get('gender') == self.user.gender)\n self.assertFalse(user_data.get('birth_date') == self.user.birth_date)", "def mock_get_real_user(_anon_id):\r\n return self.user", "def test_create_user_errors(mocker, mock_method):\n mocker.patch(mock_method, side_effect=Exception(\"error\"))\n\n with pytest.raises(Exception):\n api.create_user(\n \"username\",\n \"email@localhost\",\n {\"name\": \"My Name\", \"image\": \"http://localhost/image.jpg\"},\n )\n\n assert User.objects.all().count() == 0\n assert Profile.objects.count() == 0", "def test_whoami_by_logged_in_user(self):\n user = factories.UserFactory(\n first_name=\"Jane\", last_name=\"Doe\", email=\"[email protected]\"\n )\n org_1 = factories.OrganizationFactory()\n org_access_1 = factories.OrganizationAccessFactory(\n user=user, organization=org_1\n )\n org_2 = factories.OrganizationFactory()\n org_access_2 = factories.OrganizationAccessFactory(\n user=user, organization=org_2\n )\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": str(user.username),\n }\n print(jwt_token.payload[\"user\"])\n\n with self.assertNumQueries(3):\n response = self.client.get(\n \"/api/users/whoami/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json()[\"date_joined\"],\n user.date_joined.isoformat()[:-6] + \"Z\", # NB: DRF literally does this\n )\n self.assertEqual(response.json()[\"email\"], \"[email protected]\")\n self.assertEqual(response.json()[\"first_name\"], \"Jane\")\n self.assertEqual(response.json()[\"id\"], str(user.id))\n self.assertEqual(response.json()[\"is_staff\"], False)\n self.assertEqual(response.json()[\"is_superuser\"], False)\n self.assertEqual(response.json()[\"last_name\"], \"Doe\")\n\n resp_accesses = response.json()[\"organization_accesses\"]\n resp_org_access_1 = (\n resp_accesses.pop(0)\n if resp_accesses[0][\"organization\"] == str(org_1.id)\n else resp_accesses.pop(1)\n )\n self.assertEqual(\n resp_org_access_1,\n {\n \"organization\": str(org_1.id),\n \"organization_name\": org_1.name,\n \"role\": org_access_1.role,\n \"user\": str(user.id),\n },\n )\n resp_org_access_2 = resp_accesses.pop(0)\n self.assertEqual(\n resp_org_access_2,\n {\n \"organization\": str(org_2.id),\n \"organization_name\": org_2.name,\n \"role\": org_access_2.role,\n \"user\": str(user.id),\n },\n )", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_wrong_email_on_login(self):\n self.response = self.client.post(\n \"/api/users/login/\",\n {\"user\": {\n \"email\": '[email protected]',\n \"password\": \"fakemail\",\n }\n },\n format=\"json\")\n self.assertEqual('A user with this email and password was not found.',\n self.response.json()['errors']['error'][0])", "def test_custom_validation_exceptions(self):\n expected = {\n \"errors\": [\n {\n \"status\": \"400\",\n \"source\": {\n \"pointer\": \"/data/attributes/email\",\n },\n \"detail\": \"Enter a valid email address.\",\n \"code\": \"invalid\",\n },\n {\n \"id\": \"armageddon101\",\n \"detail\": \"Hey! You need a last name!\",\n \"meta\": \"something\",\n \"source\": {\"pointer\": \"/data/attributes/lastName\"},\n },\n ]\n }\n response = self.client.post(\n \"/identities\",\n {\n \"data\": {\n \"type\": \"users\",\n \"attributes\": {\n \"email\": \"bar\",\n \"last_name\": \"alajflajaljalajlfjafljalj\",\n },\n }\n },\n )\n\n assert expected == response.json()", "def test_get_user(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n response = client.get(reverse(\"user-detail\"), format=\"json\")\n assert response.status_code == status.HTTP_200_OK\n assert json.dumps(response.data) == json.dumps(\n {\"id\": sample_user.pk, \"email\": sample_user.email}\n )", "def test_retrieve_profile(self):\n\n response = self.client.get(URL_ME)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Check that the user object returns as expected. There is no need\n # (and it is not secure) to return a password to client side.\n self.assertEqual(response.data, {\n 'name': self.user.name,\n 'email': self.user.email\n })", "def test_user_get_failure_using_basic_auth(self):\n # setup\n user = self.generate_username_password()\n resp = self.create_user(user)\n resp_body = resp.json()\n try:\n assert resp.status_code == 201\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body[\"username\"] == user[\"userName\"]\n assert resp_body[\"userID\"] != \"\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n uuid_ = str(uuid.uuid4())\n\n # test\n resp2 = self.get_user_basic_auth(uuid_, user)\n resp_body2 = resp2.json()\n assert resp2.status_code == 401\n assert resp2.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body2[\"code\"] == \"1207\"\n assert resp_body2[\"message\"] == \"User not found!\"\n\n # teardown:\n resp3 = self.delete_user_basic_auth(resp_body[\"userID\"], user)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)", "def test_get_review_detail_fail(self):\n client = Client()\n response = client.get('/api/review/1/')\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/review/7/')\n self.assertEqual(response.status_code, 404)", "def test_get_specific_token_anonymous_user(self):\r\n\r\n res = self.app.get('/api/token/twitter')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err", "def test_call_unauthenticated(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(401)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)", "def test_create_user_endpoint_duplicate_user(self):\n kwargs = {'return_response_obj': True}\n response = self.test_create_user_endpoint(**kwargs)\n email = json.loads(response.text)[\"data\"][\"user\"][\"email\"]\n\n kwargs = {\"return_response_obj\": True, \"return_failure_response\": True, \"email\": email}\n response = self.test_create_user_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"", "def test_request_users_user_invalid(self):\n response = requests.get(self.url + '/users/invalid')\n\n self.assertEqual(response.status_code, 404)", "def test_unauthorised_access(mocker, expected_response, uclient):\n\n with pytest.raises(UnauthorisedResourceException):\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n uclient.get_balance_info(currency_id=1)", "def test_non_registered_user_login(self):\n with self.client:\n response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps({\n \"password\": \"qwerty@123\",\n \"username\": \"EdwinKyato\"\n }),\n content_type='application/json'\n )\n print(response.data)\n data = json.loads(response.data.decode())\n print(response.status_code)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 400)", "def test_retrieve_profile_success(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, {\n 'email': self.user.email,\n 'name': self.user.name\n })", "def test_retrieve_profile_success(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)\r\n self.assertEqual(res.data, {\r\n 'name': self.user.name,\r\n 'email': self.user.email,\r\n })", "def test_008_update_user_invalid_user_identity(self, mock_db_query, mock_db_add, mock_db_commit):\n mock_db_query.get.side_effect = [\n None,\n seller1.address\n ]\n\n standard_dict_update = standard_dict\n standard_dict_update['identity'] = 0\n standard_dict_update['first_name'] = \"Sally\"\n response = self.app.put('/v1/users/0', data=json.dumps(standard_dict_update),\n headers={'accept': 'application/json', 'content-type': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 404)\n # Check we do not call the any database methods\n self.assertFalse(mock_db_add.called)\n self.assertFalse(mock_db_commit.called)\n self.assertIn('User not found', response.get_data().decode())", "def test_register_http_failure_in_odata(self):\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n\n odata_company_id = 'NCC1701D'\n odata_api_root_url = 'http://api.successfactors.com/odata/v2/'\n mocked_odata_api_url = self._mock_odata_api_for_error(odata_api_root_url, self.USER_USERNAME)\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': odata_api_root_url,\n 'odata_company_id': odata_company_id,\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n with LogCapture(level=logging.WARNING) as log_capture:\n self._test_register()\n logging_messages = str([log_msg.getMessage() for log_msg in log_capture.records]).replace('\\\\', '')\n assert odata_company_id in logging_messages\n assert mocked_odata_api_url in logging_messages\n assert self.USER_USERNAME in logging_messages\n assert 'SAPSuccessFactors' in logging_messages\n assert 'Error message' in logging_messages\n assert 'System message' in logging_messages\n assert 'Headers' in logging_messages", "def getUserInfo(request):\n try:\n user = UserSerializer(User.objects.get(id=request.data.get(\"id\")))\n return Response(user.data)\n \n\n except User.DoesNotExist:\n fail = {\n \"user\": \"user does not exist\"\n }\n return JsonResponse(fail)", "def test_location_detail_api_unauthorized (self):\n\n # get object\n location_api_1 = Location.objects.get(location_name='location_api_1')\n # get response\n response = self.client.get('/api/location/' + str(location_api_1.location_id) + '/')\n # compare\n self.assertEqual(response.status_code, 401)", "def test_login_with_wrong_username(self):\n reply = self.admin_register()\n user = dict(\n username='codjoe',\n password='Andela8'\n )\n resp = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n\n reply = json.loads(resp.data.decode())\n\n\n self.assertEqual(reply['message'], 'Wrong username!')\n self.assertEqual(resp.status_code, 400)", "def check_user_data_in_response(response_data):\n assert response_data[\"id\"] > 0\n assert response_data[\"name\"] == pytest.test_user.name\n assert response_data[\"email\"] == pytest.test_user.email\n assert response_data[\"gender\"] == pytest.test_user.gender\n assert response_data[\"status\"] == pytest.test_user.status", "def test_serialization(self):\n r = mock.Mock(spec=requests.Response)\n r.status_code = 200\n r.content = \"iyam not jason\"\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n self.assertRaises(exceptions.BadResponse, f.user_profile_get)", "def test_invalid_authentication(mock_get, api):\n\n\terror_message = \"\"\"{\"message\": \"Invalid authentication credentials\"}\"\"\"\n\tmock_get.return_value = Mock(text=error_message, status_code=403)\n\tmock_get.return_value.raise_for_status.side_effect = HTTPError\n\n\twith pytest.raises(top_stories.InvalidAuthentication):\n\t\tapi.get_stories(\"opinion\")", "def test_wrong_user_creation(self):\n with mock.patch.multiple(\n PersonClient,\n _post=mock.MagicMock(return_value={'persons': []})\n ):\n data = json.dumps([{'person': {\n 'name': 'Alan Turing',\n 'email': '[email protected]',\n 'context': None,\n 'external': True\n }}])\n response = self._post(data)\n self.assertStatus(response, 406)\n\n user = Person.query.filter_by(email='[email protected]').first()\n self.assertIsNone(user)", "def mock_api_stage_fail_login() -> str:\n return DUMMY_LOGIN_RESPONSE", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))", "def test_fail_post_detail_user(sample_user):\n\n client = auth_client(APIClient(), sample_user.email, \"testpassword\")\n\n user_data = {\"email\": \"[email protected]\", \"password\": \"testpassword\"}\n response = client.post(reverse(\"user-detail\"), user_data, format=\"json\")\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED", "def test_get_user_info_returns_user(self, db_mock):\n repo = Repository()\n db_instance = db_mock.return_value\n db_instance.get_user_info.return_value = (1, {\"name\": \"Kari\"})\n self.assertEquals(repo.get_user_info(\"123\"), (1, {\"name\": \"Kari\"}))", "def test_handle_unassign_user_lookup_error(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n return Team(\"GTID\", \"team-name\", \"display-name\")\r\n else:\r\n raise LookupError(\"user lookup error\")\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(self.testcommand.handle(\"project unassign ID\",\r\n user),\r\n (\"user lookup error\", 200))", "def test_authenticate_user_with_incorrect_username(self):\n data = {\n 'username': 'test_user_2',\n 'password': 'testpassword'\n }\n response = self.client.post(self.authenticate_url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['non_field_errors']), 1)", "def test_401_response(self):\n mock = Mock()\n mock.status_code = 401\n\n with self.assertRaises(AuthError):\n check_response(mock)", "def test_get_non_valid_token(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n res = self.app.get('/api/token/non-valid?api_key=' + user.api_key)\r\n error = json.loads(res.data)\r\n\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'GET', error\r\n assert error['target'] == 'token', error\r\n assert error['exception_cls'] == 'NotFound', error", "def test_known_user(self):\n self.sign_in()\n u = User.objects.create(first_name = \"David\",\n last_name = 'Smith',\n password='******',\n email='[email protected]',\n phone_number='012-345-6789')\n response = self.client.get(reverse('backend:user_details', args=(u.pk,)))\n self.assertEqual(response.status_code, 200)\n self.assertDictEqual(response.json(), u.json_detail())", "def test_retrieve_profile_success(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, {\n 'name': self.user.name,\n 'email': self.user.email\n })", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def test_whoami_by_anonymous_user(self):\n response = self.client.get(\"/api/users/whoami/\")\n self.assertEqual(response.status_code, 401)", "def test_update_user_endpoint(self, **kwargs):\n print(\"Create a new user\")\n kwargs['return_response_obj'] = True\n response = self.test_create_user_endpoint(**kwargs)\n response = json.loads(response.text)\n\n print(\"Capture Authorization token\")\n token_type = response[\"data\"][\"token\"][\"token_type\"]\n access_token = response[\"data\"][\"token\"][\"access_token\"]\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"{0} {1}\".format(token_type, access_token)}\n kwargs['headers'] = headers\n\n print(\"Update the User\")\n custom_data = Workflows.update_user_details(test_args=self.test_args, **kwargs)\n kwargs[\"data\"] = {\"user\": custom_data}\n\n restapi = Rest(base_uri=self.global_config[\"base_url\"])\n response = restapi.put(relative_url=self.test_args[\"relative_url\"], **kwargs)\n\n if kwargs.get(\"return_response_obj\", False):\n return response\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"\n return None" ]
[ "0.68724155", "0.6683057", "0.6637301", "0.6578398", "0.6559852", "0.6474468", "0.6464813", "0.6425861", "0.638332", "0.63573205", "0.6336519", "0.6328632", "0.63244635", "0.62987155", "0.6267484", "0.62195075", "0.61980534", "0.6191649", "0.61694646", "0.614538", "0.61373806", "0.61093605", "0.60893244", "0.6086845", "0.60762435", "0.60732096", "0.6070228", "0.6063439", "0.6052495", "0.60500985", "0.60448635", "0.6042089", "0.6040222", "0.60338384", "0.6025418", "0.6017988", "0.60115457", "0.5998117", "0.59945905", "0.59941703", "0.5975242", "0.59731525", "0.59699804", "0.5962024", "0.59599286", "0.5940559", "0.5936024", "0.5935284", "0.59298134", "0.5926583", "0.5923022", "0.592082", "0.5901371", "0.5900314", "0.5895639", "0.58951515", "0.58917594", "0.58888006", "0.58882093", "0.58874714", "0.58858174", "0.588346", "0.588346", "0.58814967", "0.58754253", "0.587333", "0.5866431", "0.585418", "0.5842543", "0.5841694", "0.58386326", "0.58355945", "0.5829699", "0.5829584", "0.58252805", "0.5815661", "0.5814603", "0.58128357", "0.5811915", "0.581077", "0.5810035", "0.5804478", "0.58028823", "0.5802498", "0.5798951", "0.57982457", "0.5794626", "0.5794436", "0.5793099", "0.57911396", "0.5788622", "0.5785918", "0.57857245", "0.57832885", "0.57819855", "0.5780789", "0.5778647", "0.5778548", "0.5778416", "0.5773697" ]
0.73752326
0
Return a 500 error when someone tries to call the URL.
def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d' headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number return 500, headers, 'Failure!'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def internal_error():\n return HttpError(500)", "def view_500(request, url = None):\n res = render_to_response(\"500.html\", context_instance=RequestContext(request))\n res.status_code = 500\n return res", "def server_error(e):\n return 'Error while serving request', 500", "def internal_server_error(e):\n return render_template('500.html', error=repr(e)), 500", "def handler500(request):\n response = render_to_response('500.html', {}, RequestContext(request))\n response.status_code = 500\n return response", "def internal_server_error(e):\n return render_template(\"error/500.html\"), 500", "def handle_api_error(e):\n return f\"Failed to call Giphy API: {e}\", 500", "def error_bad_url(self):\n self._error(400, \"Bad Request\")", "def server_error(e):\n return render_template('500.html'), 500", "def internal_error(e):\n return render_template(\"errors/500.html\"), 500", "def server_error(request):\n response = render(request, '500.html')\n response.status_code = 500\n\n return response", "def handler500(request, *args, **argv):\n response = render_to_response('500.html', {})\n response.status_code = 500\n return response", "def internal_server_error(e):\n\n # Respons to api request\n if request.accept_mimetypes.accept_json and \\\n not request.accept_mimetypes.accept_html:\n resp = jsonify({'error': 'internal server error'})\n resp.status_code = 500\n return resp\n\n return render_template('errors/500.html'), 500", "def sample_500_response():\n response = requests.get(\"https://google.com\")\n response.status_code = 500\n return response", "def page_not_found(e):\n return render_template(\"500.html\"), 500", "def server_error(e):\n return 'Eftirfarandi villa kom upp: {}'.format(e), 500", "def flask_force_error():\n raise Exception('forced 500 error')", "def handler500(request):\n \n #Setting the variable and template page for the 500 error\n response = render_to_response('500.html', {}, context_instance=RequestContext(request))\n response.status_code = 500\n return response", "def err500():\n return render_template('404.html', year=datetime.now().year)", "def handle_500_error(_error):\n return make_response(jsonify(SERVER_ERROR), 500)", "def httperror( status_code=500, message=b'' ):", "def _raise_http_error(self, *args, **kwargs):", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return render_template('500.html', error=e), 500", "def nondefault_500_error(request, template_name='500nondefault.html'):\n t = loader.get_template(template_name) # You need to create a 500.html template.\n ltype,lvalue,ltraceback = sys.exc_info()\n sys.exc_clear() #for fun, and to point out I only -think- this hasn't happened at \n #this point in the process already\n return http.HttpResponseServerError(t.render(Context({'type':ltype,'value':lvalue,'traceback':ltraceback})))", "def error_500(error):\n # Delete the error variable as unused\n del error\n # Render 404 page\n return render_template('500.html'), 500", "def internal_server_error(error):\n return render_template('error.html', error_msg=\"500 Internal Server error\", pagetitle=\"500 Internal Server error\"), 500", "def url_error():\n try:\n from urllib.error import URLError\n except ImportError:\n from urllib2 import URLError # suppress(import-error)\n\n return URLError", "def handle_500(e):\n try:\n raise e\n except:\n return traceback.format_exc(), 500", "def badRequest(message):\r\n raise Http404(message)", "def error_500_handler(error):\n new_issue = 'https://github.com/andresriancho/w3af/issues/new'\n\n try:\n # Extract the filename and line number where the exception was raised\n exc_type, exc_value, exc_traceback = sys.exc_info()\n filepath = traceback.extract_tb(exc_traceback)[-1][0]\n filename = basename(filepath)\n lineno, function_name = get_last_call_info(exc_traceback)\n\n response = jsonify({'code': 500,\n 'message': str(error),\n 'filename': filename,\n 'line_number': lineno,\n 'function_name': function_name,\n 'exception_type': error.__class__.__name__,\n 'please': new_issue})\n except Exception, e:\n # I don't want to fail in the exception handler\n response = jsonify({'code': 500,\n 'exception': str(error),\n 'handler_exception': str(e),\n 'please': new_issue})\n\n response.status_code = 500\n return response", "def internal_error_handler(error):\r\n return render_template('error.500.html')", "def not_found():\n return HttpError(404)", "def internal_server_error(error): # pylint: disable=unused-argument\n response = jsonify(\n {\n \"success\": False,\n \"error_code\": 500,\n \"message\": \"Internal Server Error\",\n }\n )\n return response, 500", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = traceback.format_exc()\n logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return \"Internal Server Error\", 500", "def bad_request():\n return HttpError(400)", "def internal_server_error(err):\n return error_formatter(code='500_01', details=err)", "def raise_500():\n raise ValueError('Foo!')", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def bad_request(e):\n return render_template(\"400.html\", page_title=400)", "def server_error(request, template_name='500.html'):\n # don't risk running context processors\n context = dict(settings.TEMPLATE_CONSTANTS)\n context['MEDIA_URL'] = settings.MEDIA_URL\n context['STATIC_URL'] = settings.STATIC_URL\n return render_to_response(template_name, context)", "def _raise_performing_request_error(self, *args, **kwargs):", "def _raise_if_error(response):\n if response.status_code != 200:\n raise SimpleHTTPException(response)", "def page_not_found(er):\n return render_template('errors.html'), 500", "def server_error(request):\n return defaults.server_error(request, template_name=get_template_name(request, \"500.html\"))", "def internal_error(exception):\n app.logger.error(exception)\n return flask.make_response('server error', 500)", "def error404(ex):\n # logger.error(ex)\n return \"error 404 : {0}\".format(ex.body)", "def error_404(error):\n return 'Bummer, there is nothing at this URL.'", "def internal_error(error):\n return jsonify({'error': \"Internal Server Error. \"\n \"Bitte die Logdatei für Details anschauen.\"}), 500", "def _request(self, url):\n response = requests.get(url, headers=self.header)\n\n if str(response.status_code).startswith('2'):\n return response\n\n raise Exception(\"URI request returned an error. Error Code \" + str(response.status_code))", "def renderHTTP_exception(request, failure):", "def custom_500(request, exception=None):\n return render(request, \"500.html\", {\"exception\": exception})", "def handler500(request):\n import sys,traceback\n from django.template import Context, loader\n from django.http import HttpResponseServerError\n\n t = loader.get_template('500.html')\n typo, value, tb = sys.exc_info()\n\n return HttpResponseServerError(t.render(Context({\n 'exception_value': value,\n 'DEBUG': settings.TEMPLATE_DEBUG,\n 'value':typo,\n 'tb':traceback.format_exception(typo, value, tb)})))", "def not_found():\n raise cherrypy.HTTPError(404, \"Not Found.\")", "def get_500_response(message):\n headers = HTTPHeaders.HTTPHeaders()\n add_default_headers(headers)\n headers[\"Connection\"] = \"close\"\n headers[\"Content-Length\"] = str(len(message))\n headers[\"Content-Type\"] = \"text/plain\"\n\n return HTTPResponse.HTTPResponse(version=1.0, status_code=500, phrase=\"Internal Error\",\n headers=headers, data=message)", "def handle_uncaught_error(e):\n status_code = 500\n\n result = {\n \"error_message\": \"Unknown or unexpected error.\",\n \"error_code\": \"INTERNAL_SERVER_ERROR\"\n }\n return jsonify(result), status_code", "def error(\n status=500,\n message=\"Internal Server Error\"\n):\n return make_response(\n jsonify(error=message),\n status,\n )", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, nothing at this URL.', 404", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = format_exc()\n app.logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return jsonify(message=\"Internal Server Error\"), 500", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "def server_error(err):\n log.error(err)\n return err.msg, 500", "def error_not_found(error):\n return 'No page here, dood. 404!', 404", "def error_handler(source, prod, HEADERS):\n\n try:\n req = requests.get(source, params=prod, headers=HEADERS)\n except Timeout as e:\n print(\"\\nThe website took too long to respond. Please try after sometime.\\n\")\n sys.exit(1)\n except ConnectionError as e:\n print(\"\\nYou do not have a descent internet connection. Please check your Internet Connection and try again later.\\n\")\n sys.exit(1)\n except TooManyRedirects as e:\n print(\"\\nYour request exceeded the configured number of maximum redirections. Please try after sometime.\\n\")\n sys.exit(1)\n except Exception as e:\n print(\"\\nRequest souldn't be completed. Please try after sometime.\\n\")\n sys.exit(1)\n\n return req", "def server_fault(e):\n return \"Something went wrong, and it is our fault. Try reloading the page.\"", "def error(self, code, message = ''):\n self.response.set_status(404)\n raise Exception(message)", "def response_500(description=None):\n resp_def = dict(util.RESPONSE_404)\n if description is not None:\n resp_def['description'] = description\n return response(403, resp_def)", "def _handle_api_error(ex):\n if request.path.startswith('/api/'):\n message, detail = str(ex).split(\": \")\n return jsonify(message=message, detail=detail), ex.code\n else:\n return ex", "def call(url):\n result = requests.get(url)\n if 300 <= result.status_code < 400:\n raise TemporaryException\n if result.status_code == 429:\n raise ApiCountZeroException\n if 400 <= result.status_code < 600:\n raise PermanentException\n return result", "def resp500(msg):\n app.logger.error(msg)\n return Resp({'message':msg, 'success':False}, status=500)", "def _send_internal_server_error(self):\n template_filename = self._get_config_template('500')\n text = read_template(\n template_filename,\n title='%s - Internal Error' % SERVER_NAME,\n header='Internal error')\n if not text:\n # fallback to hard-coded template\n text = TEMPLATE_500\n self._send_head(text, 500)\n if not self._header_only:\n self.wfile.write(text)", "def server_error(error=None):\n return jsonify({\n 'Error': 'Check if the request causes a server error'\n }), 500", "def internal_error(error):\n current_app.logger.info(error)\n db.session.rollback()\n return error, \"500\"", "def bad_request(message):\n return error_response(400, message)", "def raise_on_error(request: requests.Response) -> None:\n if request.status_code >= 400:\n json_res = request.json()\n raise requests.HTTPError(json_res)\n\n return None", "def handler404(request):\n \n #Setting the variable and template page for the 500 error\n response = render_to_response('404.html', {}, context_instance=RequestContext(request))\n response.status_code = 404\n return response", "def handle_error(self, e):\n code = getattr(e, 'code', 500) # Gets code or defaults to 500\n if code == 404:\n return self.make_response({\n 'message': 'not-found',\n 'code': 404\n }, 404)\n return super(MyApi, self).handle_error(e) # handle others the default way", "def not_found_error(error):\n current_app.logger.info(error)\n return error, \"404\"", "def page_not_found(er): \n return render_template('errors.html'), 400", "def error_404(error):\n return '404 Error'", "def server_error(request, template_name='500.html', data=None):\n\n error_id = str(uuid.uuid4())\n error_message = 'Error ID: %s' % error_id\n if hasattr(request, 'session'):\n error_message = '%s. URLs leading up to this error: %s' % (\n error_message, url_history(request.session))\n logger.error(error_message)\n\n t = loader.get_template(template_name)\n\n if data:\n data = dict(data)\n else:\n data = dict()\n data['error_id'] = error_id\n result = http.HttpResponseServerError(t.render(Context(data)))\n return result", "def handle_failure_request(self) -> HttpResponse:\n return HttpResponseNotFound()" ]
[ "0.7490835", "0.74022293", "0.70942414", "0.70713156", "0.7046498", "0.7032829", "0.70033985", "0.69633645", "0.6963141", "0.6925367", "0.6915124", "0.69088185", "0.6898223", "0.68840873", "0.6861838", "0.68376124", "0.6830139", "0.68263555", "0.6814626", "0.67630464", "0.6759206", "0.673339", "0.6721863", "0.6721863", "0.6721863", "0.6721863", "0.6721863", "0.6721863", "0.6721863", "0.6721863", "0.6674612", "0.6625911", "0.6624423", "0.6611116", "0.66063064", "0.65936804", "0.6591452", "0.657713", "0.6570677", "0.6547706", "0.6544908", "0.65416324", "0.6520291", "0.650345", "0.64946634", "0.64893085", "0.6477793", "0.64735055", "0.6465568", "0.64648896", "0.6437387", "0.64373183", "0.6436901", "0.6415789", "0.6409551", "0.6396416", "0.63484347", "0.63421947", "0.6324419", "0.6314921", "0.6310338", "0.63098353", "0.6301263", "0.6282545", "0.6275548", "0.6275548", "0.6275548", "0.6275548", "0.6275548", "0.6275548", "0.6275548", "0.6256568", "0.6249031", "0.6249031", "0.6249031", "0.6249031", "0.6249031", "0.6249031", "0.6249031", "0.6249031", "0.6245463", "0.62300444", "0.62283677", "0.62187916", "0.6210729", "0.6201646", "0.6201114", "0.6196874", "0.61962646", "0.6178434", "0.6174765", "0.6172849", "0.61510277", "0.61483777", "0.61474675", "0.61430275", "0.6138609", "0.61365646", "0.61346114", "0.6131425", "0.61247426" ]
0.0
-1
Configure the provider such that it doesn't have enough details to contact the SAP SuccessFactors API, and test that it falls back to the data it receives from the SAML assertion.
def test_register_insufficient_sapsf_metadata(self): self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings='{"key_i_dont_need":"value_i_also_dont_need"}', ) # Because we're getting details from the assertion, fall back to the initial set of details. self.USER_EMAIL = "[email protected]" self.USER_NAME = "Me Myself And I" self.USER_USERNAME = "myself" self._test_register()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_claims_supported_not_set(self):\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], [])", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def test_non_proctortrack_provider(self, proctoring_provider, escalation_email):\n self.setup_course_with_proctoring_backend(proctoring_provider, escalation_email)\n\n self.instructor.is_staff = True\n self.instructor.save()\n self._assert_escalation_email_available(False)", "def test_register_http_failure_in_odata(self):\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n\n odata_company_id = 'NCC1701D'\n odata_api_root_url = 'http://api.successfactors.com/odata/v2/'\n mocked_odata_api_url = self._mock_odata_api_for_error(odata_api_root_url, self.USER_USERNAME)\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': odata_api_root_url,\n 'odata_company_id': odata_company_id,\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n with LogCapture(level=logging.WARNING) as log_capture:\n self._test_register()\n logging_messages = str([log_msg.getMessage() for log_msg in log_capture.records]).replace('\\\\', '')\n assert odata_company_id in logging_messages\n assert mocked_odata_api_url in logging_messages\n assert self.USER_USERNAME in logging_messages\n assert 'SAPSuccessFactors' in logging_messages\n assert 'Error message' in logging_messages\n assert 'System message' in logging_messages\n assert 'Headers' in logging_messages", "def test_ssl_default(self):\n e = ErrataConnector()\n assert e.ssl_verify", "def setup_provider(self):\n pass", "def test_new_token_insuficient_config(db, mocker):\n settings.AUTH0_CLIENT = None\n\n mock = mocker.patch(\"creator.authentication.requests.post\")\n\n assert get_token(\"my_aud\") is None\n assert mock.call_count == 0", "def test_register_sapsf_metadata_present_empty_value_override(self):\n\n value_map = {'country': {}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "async def test_config_entry_no_authentication(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n aioclient_mock.post(\n \"http://example.local:80/1234/JQ?Parameter=6224,6225,6226\",\n exc=aiohttp.ClientError,\n )\n\n entry = await init_integration_without_auth(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY", "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_auth_failure_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey') + \"1234\",\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertEqual(\n response,\n None,\n \"Authentication did not return 'None', but %s instead.\" % (\n response\n )\n )", "def setUp(self):\n self.ol = OneloginAWS(\n _MockSection(\n base_uri=\"https://api.us.onelogin.com/\",\n client_id='mock-id',\n client_secret='mock-secret',\n aws_app_id='mock-app-id',\n subdomain='example',\n can_save_password=False,\n username='mock-username',\n duration_seconds=2600,\n auto_determine_ip_address=False,\n ),\n )\n\n self.ol.password = \"mock-password\"\n\n self.get_saml_assertion_mock = MagicMock(return_value=Namespace(\n mfa=Namespace(\n devices=[Namespace(type='mock1', id='mock-id-1'), ],\n state_token='mock-token'\n ),\n ))\n self.get_saml_assertion_verifying_mock = MagicMock(\n return_value='mock-saml-response'\n )\n self.ol.ol_client = Namespace(\n get_saml_assertion=self.get_saml_assertion_mock,\n get_saml_assertion_verifying=(\n self.get_saml_assertion_verifying_mock\n ),\n error=None,\n )", "def test_sanity(self, mock_provider):\n p = mock_provider()\n assert p.metadata == {'base_url': 'https://api.mock.com',\n 'provider_name': 'mock_provider',\n 'site_url': 'https://www.mock.com'}\n assert p.arguments == {\n 'not_required': {\n 'oneOf': [\n {'items': {'type': 'string'}, 'minItems': 1, 'type': 'array', 'uniqueItems': True},\n {'type': 'string'}\n ]\n },\n 'required': {'type': 'string'},\n 'message': {'type': 'string'},\n 'option_with_default': {'type': 'string'}\n }\n\n assert p.required == ['required']\n rsp = p.notify(**self.valid_data)\n assert isinstance(rsp, Response)\n assert not rsp.errors\n assert rsp.raise_on_errors() is None\n assert repr(rsp) == '<Response,provider=Mock_provider,status=success>'\n assert repr(p) == '<Provider:[Mock_provider]>'", "def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_ssl_default(self):\n assert security.security_settings.ssl_verify()", "async def test_config_entry_not_ready(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n aioclient_mock.post(\n \"http://example.local:80/1234/JQ?Parameter=6224,6225,6226\",\n exc=aiohttp.ClientError,\n )\n\n entry = await init_integration(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY", "def assertion_callback(_request, _uri, headers):\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')", "def test_02_sso_read(self):\n self.direct_login_user_1()\n new_application = self.my_context_dict[\"new_application\"]\n previous_conf = self.api_handler.conf\n new_conf = previous_conf() ## here we instanciate the previous conf so that we can modify some values without changing the class values\n new_conf.ICEBERG_APPLICATION_SECRET_KEY = str(new_application.fetch_secret_key())\n new_conf.ICEBERG_APPLICATION_NAMESPACE = str(new_application.namespace)\n\n self.api_handler = IcebergAPI(conf = new_conf)\n self.login_user_1()\n application = self.api_handler.Application.find(new_application.id)\n self.assertFalse(application==None)\n\n # self.login_user_2()\n # try:\n # application = self.api_handler.Application.find(new_application.id)\n # except IcebergClientUnauthorizedError:\n # ## should raise this exception\n # pass\n # else:\n # raise Exception(\"Application should not be accessible by user_2\")\n\n self.api_handler.conf = previous_conf", "def testInitEmpty():\n conf = naiveConf.NaiveConf()\n with pytest.raises(KeyError):\n print conf.x\n conf.x = 5\n assert conf.x == 5", "def test_attribute_defaults(self):\n creds = NokiaCredentials()\n self.assertEqual(creds.access_token, None)\n self.assertEqual(creds.token_expiry, None)\n self.assertEqual(creds.token_type, None)\n self.assertEqual(creds.token_expiry, None)\n self.assertEqual(creds.user_id, None)\n self.assertEqual(creds.client_id, None)\n self.assertEqual(creds.consumer_secret, None)", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def test_register_sapsf_with_value_default(self):\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,country,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'country': 'Australia'\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)\n\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings),\n default_email='[email protected]'\n )\n self.USER_EMAIL = '[email protected]'\n self._test_register()", "def check_configuration(self):\n self.ensure_one()\n getattr(self, '%s_check_configuration' % self.provider, lambda: None)()", "def test_provider(self):\n msg = 'Wrong number of processing algorithm loaded.'\n self.assertEqual(len(self.provider.alglist), 6, msg)\n\n msg = 'InaSAFE should be activated by default in Processing.'\n self.assertEqual(self.provider.activate, True, msg)\n\n msg = 'Wrong processing provide.'\n for algorithm in self.provider.alglist:\n self.assertEqual(algorithm.provider, self.provider, msg)", "def test_get_provider_traits_error(self, log_mock):\n uuid = uuids.compute_node\n resp_mock = mock.Mock(headers={\n 'x-openstack-request-id': uuids.request_id})\n self.ks_adap_mock.get.return_value = resp_mock\n\n for status_code in (400, 404, 503):\n resp_mock.status_code = status_code\n self.assertRaises(\n exception.ResourceProviderTraitRetrievalFailed,\n self.client.get_provider_traits, self.context, uuid)\n\n expected_url = '/resource_providers/' + uuid + '/traits'\n self.ks_adap_mock.get.assert_called_once_with(\n expected_url,\n global_request_id=self.context.global_id,\n **self.trait_api_kwargs)\n self.assertTrue(log_mock.called)\n self.assertEqual(uuids.request_id,\n log_mock.call_args[0][1]['placement_req_id'])\n self.ks_adap_mock.get.reset_mock()\n log_mock.reset_mock()", "def test_error_on_invalid_provider(self):\n self.ocp_data.update({\"source_uuids\": [\"1dd7204c-72c4-4ec4-95bc-d5c447688b27\"]})\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def test_required_config_none(self):\n base_config = BaseConfig()\n setattr(base_config, 'required_config', ['TEST_CONF'])\n setattr(base_config, 'TEST_CONF', None)\n\n self.assertRaises(Exception, base_config.check_required_config)", "def test_adapter_required(self):\n from fixtures.test_adapter import TestAdapter\n from pyperry import errors\n class Test(pyperry.Base):\n def _config(cls):\n cls.configure('read', poop='smells')\n\n self.assertRaises(errors.ConfigurationError, Test.adapter, 'read')", "def test_no_adapter_opts(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): no such option\",\n )", "def setUp(self, plurals={}):\r\n super(CLITestV20Base, self).setUp()\r\n client.Client.EXTED_PLURALS.update(constants.PLURALS)\r\n client.Client.EXTED_PLURALS.update(plurals)\r\n self.metadata = {'plurals': client.Client.EXTED_PLURALS,\r\n 'xmlns': constants.XML_NS_V20,\r\n constants.EXT_NS: {'prefix':\r\n 'http://xxxx.yy.com'}}\r\n self.mox = mox.Mox()\r\n self.endurl = ENDURL\r\n self.fake_stdout = FakeStdout()\r\n self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.fake_stdout))\r\n self.useFixture(fixtures.MonkeyPatch(\r\n 'neutronclient.neutron.v2_0.find_resourceid_by_name_or_id',\r\n self._find_resourceid))\r\n self.useFixture(fixtures.MonkeyPatch(\r\n 'neutronclient.neutron.v2_0.find_resourceid_by_id',\r\n self._find_resourceid))\r\n self.useFixture(fixtures.MonkeyPatch(\r\n 'neutronclient.v2_0.client.Client.get_attr_metadata',\r\n self._get_attr_metadata))\r\n self.client = client.Client(token=TOKEN, endpoint_url=self.endurl)", "def test_init(self):\n with self.assertRaises(ValueError):\n TraxionPay(api_key=self.api_key)", "def test_client_verification_retrieve(self):\n pass", "def test_data(self):\n provider = CreditProviderFactory(active=False)\n serializer = serializers.CreditProviderSerializer(provider)\n expected = {\n 'id': provider.provider_id,\n 'display_name': provider.display_name,\n 'url': provider.provider_url,\n 'status_url': provider.provider_status_url,\n 'description': provider.provider_description,\n 'enable_integration': provider.enable_integration,\n 'fulfillment_instructions': provider.fulfillment_instructions,\n 'thumbnail_url': provider.thumbnail_url,\n }\n self.assertDictEqual(serializer.data, expected)", "def test_checkmarx_init_no_ssl(self, mock_url_read, mock_create_unverified_context):\n # pylint: disable=protected-access\n delattr(ssl, '_create_unverified_context')\n mock_url_read.return_value = '{\"access_token\": \"abc123\"}'\n marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec\n\n self.assertIsNotNone(marx)\n self.assertFalse(hasattr(ssl, '_create_unverified_context'))\n self.assertTrue(hasattr(ssl, '_create_default_https_context'))\n mock_create_unverified_context.assert_not_called()", "def test_verification_failed(self):\n pass", "def test_metadata_saml_not_authorized():\n\n responses.add(\n responses.GET,\n f\"{SERVICE_URL}/$metadata\",\n content_type='text/html; charset=utf-8',\n status=200)\n\n with pytest.raises(HttpError) as e_info:\n pyodata.Client(SERVICE_URL, requests)\n\n assert str(e_info.value).startswith('Metadata request did not return XML, MIME type:')", "def test_init_correct(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n self.assertEqual(ap.ip, '2.2.2.2')", "def test_claims_supported_set(self):\n expected_claims = ['openid', 'email']\n\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], expected_claims)", "async def test_setup_fail_on_ssl_erros(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n respx.get(\"https://localhost\").mock(side_effect=ssl.SSLError(\"ssl error\"))\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"https://localhost\",\n \"method\": \"GET\",\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0\n assert \"ssl error\" in caplog.text", "def __init__(self, base_url):\n module_base = \"%s/%s\" % (base_url, Saml2BackendPlugin.provider)\n sp_config = {\n \"entityid\": \"%s/proxy_sp.xml\" % module_base,\n \"service\": {\n \"sp\": {\n \"allow_unsolicited\": True,\n \"endpoints\": {\n \"assertion_consumer_service\": [\n (\"%s/acs/post\" % module_base, BINDING_HTTP_POST),\n (\"%s/acs/redirect\" % module_base, BINDING_HTTP_REDIRECT)\n ],\n }\n }\n },\n \"key_file\": TestConfiguration.get_instance().backend_key.name,\n \"cert_file\": TestConfiguration.get_instance().backend_cert.name,\n \"metadata\": {\n \"local\": TestConfiguration.get_instance().fake_idp_metadata,\n },\n\n \"xmlsec_binary\": TestConfiguration.get_instance().xmlsec_path,\n }\n config = {\"config\": sp_config,\n \"idp_entity_id\": \"https://example.com/unittest_idp.xml\",\n \"state_id\": \"saml_backend_test_id\"\n }\n\n super(Saml2BackendPlugin, self).__init__(SamlBackend, Saml2BackendPlugin.provider, config)", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_patch_hyperflex_ucsm_config_policy(self):\n pass", "async def test_config_not_ready(hass):\n entry = MockConfigEntry(\n domain=DOMAIN,\n title=\"Home\",\n unique_id=\"55.55-122.12\",\n data={\n \"api_key\": \"foo\",\n \"latitude\": 55.55,\n \"longitude\": 122.12,\n \"name\": \"Home\",\n },\n )\n\n with patch(\"airly._private._RequestsHandler.get\", side_effect=ConnectionError()):\n entry.add_to_hass(hass)\n await hass.config_entries.async_setup(entry.entry_id)\n assert entry.state == ENTRY_STATE_SETUP_RETRY", "def test_init_NoConfigNoClusterFound(self):\n self.flags(cluster_name='', group='powervm')\n self.apt.read.return_value = self._bld_resp(status=204)\n self.assertRaises(npvmex.NoConfigNoClusterFound, self._get_ssp_stor)", "def test_proctortrack_provider_with_email(self):\n self.setup_course_with_proctoring_backend('proctortrack', '[email protected]')\n\n self.instructor.is_staff = True\n self.instructor.save()\n self._assert_escalation_email_available(True)", "def setUp(self):\n super().setUp()\n self.expected_entities = [\n {\n \"key\": self.API1,\n \"name\": self.API1,\n \"sample_count\": 123,\n \"error_count\": 2,\n \"error_percentage\": round((2.0 / 123) * 100, 1),\n \"mean_response_time\": 110.0,\n \"min_response_time\": 50.0,\n \"max_response_time\": 250.0,\n \"percentile_50_response_time\": 100.0,\n \"percentile_75_response_time\": 115.0,\n \"percentile_95_response_time\": 135.0,\n \"percentile_99_response_time\": 195.0,\n },\n {\n \"key\": self.API2,\n \"name\": self.API2,\n \"sample_count\": 125,\n \"error_count\": 4,\n \"error_percentage\": round((4.0 / 125) * 100, 1),\n \"mean_response_time\": 110.6,\n \"min_response_time\": 40.0,\n \"max_response_time\": 2500.0,\n \"percentile_50_response_time\": 90.0,\n \"percentile_75_response_time\": 120.0,\n \"percentile_95_response_time\": 150.0,\n \"percentile_99_response_time\": 190.0,\n },\n ]\n self.set_source_parameter(\"target_response_time\", \"10\")", "def setUp(self):\n\n self.ach_model = FundingSources.get_ach_model()\n self.verify = self.get_basic_verification()", "def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])", "def test_alpn_call_failure(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(ValueError):\n context.set_alpn_protos([])", "def test_client_not_confirmed(self):\n\t\tself.signup()\n\n\t\tresponse = self.rest_client.get(reverse('provider_setup_intro'))\n\t\tself.assertEqual(response.status_code, 302)", "def test_read_env_config4(config, environment_vars_set):\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"yesss\"", "def test_get_provider_traits_placement_comm_error(self):\n uuid = uuids.compute_node\n self.ks_adap_mock.get.side_effect = ks_exc.EndpointNotFound()\n self.assertRaises(ks_exc.ClientException,\n self.client.get_provider_traits, self.context, uuid)\n expected_url = '/resource_providers/' + uuid + '/traits'\n self.ks_adap_mock.get.assert_called_once_with(\n expected_url,\n global_request_id=self.context.global_id,\n **self.trait_api_kwargs)", "def test_use_certificate_uninitialized(self, ctx_or_conn):\n with pytest.raises(Error):\n ctx_or_conn.use_certificate(X509())", "def test_no_api_key(self):\n\n self.assertRaises(Exception, kaput.init, None, '123')", "async def test_no_services(hass: HomeAssistant) -> None:\n result1 = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result1[\"type\"] == RESULT_TYPE_FORM\n assert result1[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=[]), patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await hass.config_entries.flow.async_configure(\n result1[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_ABORT\n assert result2[\"reason\"] == \"no_services_found\"\n assert len(mock_setup_entry.mock_calls) == 0", "def test_validate_metadata_no_samples(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-no-samples-batch\",\n \"samples\": [],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)", "def test_healer_not_confirmed(self):\n\t\tself.signup('healer')\n\n\t\tresponse = self.rest_client.get(reverse('provider_setup_intro'))\n\t\tself.assertEqual(response.status_code, 200)\n\n\t\t# check if any other page except setup is available\n\t\tresponse = self.rest_client.get(reverse('notes'))\n\t\tself.assertEqual(response.status_code, 302)", "def test_default_missing_honor(self):\r\n self.url_params['honor_code'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'To enroll, you must follow the honor code.',\r\n )", "def test_config_class():\n assert config is not None", "def test_required_parameters(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n # Missing param : lis_person_sourcedid or ext_user_username or user_id\n with self.assertRaises(PermissionDenied):\n self._authenticate(\n {\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n },\n passport,\n )", "def test_create_hyperflex_ucsm_config_policy(self):\n pass", "def test_check_keys_exist_for_provider_string(self):\n\n secret_key = None\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_auth0_config_anon(anontestapp, registry):\n _test_auth_config(anontestapp, registry)", "def setUp(self):\n self.tool = flow_common_tool()\n self.xml = xml_tool()\n self.ins = system_license()\n\n self.response = {}\n self.response[\"MULTI_LICENSE\"] = \"\"\"\n <license-usage-summary xmlns=\"http://xml.juniper.net/junos/15.1I0/junos-license\">\n <features-used/>\n <feature-summary>\n <name>idp-sig</name>\n <description>IDP Signature</description>\n <licensed>1</licensed>\n <used-licensed>0</used-licensed>\n <needed>0</needed>\n <end-date junos:seconds=\"1545696000\">2018-12-25</end-date>\n </feature-summary>\n <feature-summary>\n <name>appid-sig</name>\n <description>APPID Signature</description>\n <licensed>1</licensed>\n <used-licensed>0</used-licensed>\n <needed>0</needed>\n <end-date junos:seconds=\"1545696000\">2018-12-25</end-date>\n </feature-summary>\n <feature-summary>\n <name>logical-system</name>\n <description>Logical System Capacity</description>\n <licensed>1</licensed>\n <used-licensed>1</used-licensed>\n <used-given>0</used-given>\n <needed>0</needed>\n <validity-type>permanent</validity-type>\n </feature-summary>\n <feature-summary>\n <name>remote-access-ipsec-vpn-client</name>\n <description>remote-access-ipsec-vpn-client</description>\n <licensed>2</licensed>\n <used-licensed>0</used-licensed>\n <needed>0</needed>\n <validity-type>permanent</validity-type>\n </feature-summary>\n <feature-summary>\n <name>Virtual Appliance</name>\n <description>Virtual Appliance</description>\n <licensed>1</licensed>\n <used-licensed>1</used-licensed>\n <needed>0</needed>\n <remaining-time>\n <remaining-validity-value junos:seconds=\"2572880\">29 days</remaining-validity-value>\n </remaining-time>\n </feature-summary>\n </license-usage-summary>\n \"\"\"\n\n self.response[\"SINGLE_PERMANENT_LICENSE\"] = \"\"\"\n <license-usage-summary xmlns=\"http://xml.juniper.net/junos/15.1I0/junos-license\">\n <feature-summary>\n <name>logical-system</name>\n <description>Logical System Capacity</description>\n <licensed>1</licensed>\n <used-licensed>1</used-licensed>\n <used-given>0</used-given>\n <needed>0</needed>\n <validity-type>permanent</validity-type>\n </feature-summary>\n </license-usage-summary>\n \"\"\"\n\n self.response[\"SINGLE_END_DATE_LICENSE\"] = \"\"\"\n <license-usage-summary xmlns=\"http://xml.juniper.net/junos/15.1I0/junos-license\">\n <feature-summary>\n <name>idp-sig</name>\n <description>IDP Signature</description>\n <licensed>1</licensed>\n <used-licensed>0</used-licensed>\n <needed>0</needed>\n <end-date junos:seconds=\"1545696000\">2018-12-25</end-date>\n </feature-summary>\n </license-usage-summary>\n \"\"\"\n\n self.response[\"SINGLE_REMAINING_TIME_LICENSE\"] = \"\"\"\n <license-usage-summary xmlns=\"http://xml.juniper.net/junos/15.1I0/junos-license\">\n <feature-summary>\n <name>Virtual Appliance</name>\n <description>Virtual Appliance</description>\n <licensed>1</licensed>\n <used-licensed>1</used-licensed>\n <needed>0</needed>\n <remaining-time>\n <remaining-validity-value junos:seconds=\"2572880\">29 days</remaining-validity-value>\n </remaining-time>\n </feature-summary>\n </license-usage-summary>\n \"\"\"", "def test_set_handlers_twice(self, logger_from_provider):\n with pytest.raises(LogmeError):\n logger_from_provider._set_handlers_from_conf()", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def test_get_config_th(self):\n self.assertTrue(settings.TH_TRELLO)\n self.assertIn('consumer_key', settings.TH_TRELLO)\n self.assertIn('consumer_secret', settings.TH_TRELLO)", "async def test_discovery_broken(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n data1 = '{ \"name\": \"Beer\" }'\n data2 = '{ \"name\": \"Milk\", \"state_topic\": \"test-topic\", \"command_topic\": \"test-topic\", \"options\": [\"milk\", \"beer\"]}'\n\n await help_test_discovery_broken(\n hass, mqtt_mock_entry, caplog, select.DOMAIN, data1, data2\n )", "def test_cloud_api():\n mock = provider.MockProvider()\n\n mock.setup_cloud('empty config....')\n\n assert mock.get_ext_ip_addr('some-node')", "def test_invalid_adapter_opts(self):\n self.oslo_config_dict['heat'] = {\n 'interface': 'public',\n 'valid_interfaces': 'private',\n }\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): interface and \"\n \"valid_interfaces are mutually exclusive.\",\n )", "def test_init_param(self):\n # Set a new version\n version = \"v3\"\n api_url = self.get_api_url(api_version=version)\n\n # Setup the mocked response\n responses.add(responses.GET, api_url, json=self.valid_response,\n status=200, match_querystring=False)\n\n acme = ACMEAccount(client=self.client, api_version=version)\n data = acme.all(self.org_id)\n\n # Verify all the query information\n # There should only be one call the first time \"all\" is called.\n # Due to pagination, this is only guaranteed as long as the number of\n # entries returned is less than the page size\n self.assertEqual(len(responses.calls), 1)\n self.match_url_with_qs(responses.calls[0].request.url, api_url=api_url)\n self.assertEqual(data, self.valid_response)", "def test_update_hyperflex_ucsm_config_policy(self):\n pass", "def _setup_ses(self):\n print(\"\\n ** Setting up SES mocking\")\n ses = boto3.client('ses', region_name=\"us-east-1\")\n ses.verify_domain_identity(Domain='donatemates.com')\n #response = ses.verify_email_address(EmailAddress='[email protected]')", "def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"", "def test_failed_discovery(self, req):\n req.side_effect = ks_exc.DiscoveryFailure()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls still\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)", "def configure(self):\n\n '''The method makes a test to get the site info'''\n domain = 'http://localhost:8888'\n webservice_url = '/webservice/rest/server.php?'\n parameters = {\n 'wstoken': self.token,\n 'wsfunction': 'core_webservice_get_site_info',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain + webservice_url, params=parameters)\n request = request.json()\n\n if 'exception' in request:\n if request['exception'] == \"moodle_exception\":\n if request['errorcode'] == 'invalidtoken':\n return self._reopen_form()", "def test_no_credentials(self):\n twine = Twine(source=VALID_SCHEMA_TWINE)\n twine.validate_credentials()", "def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)", "def setUpOnlySome(test):\n from quotationtool.quotation.source import plainTextQuotationFactory, restQuotationFactory \n from quotationtool.quotation.source import QuotationSourceTypesVocabulary\n from quotationtool.quotation.interfaces import IQuotationSourceFactory\n from zope.schema.interfaces import IVocabularyFactory\n import zope.component\n zope.component.provideUtility(\n plainTextQuotationFactory, IQuotationSourceFactory, 'plaintext')\n zope.component.provideUtility(\n restQuotationFactory, IQuotationSourceFactory, 'rest')\n zope.component.provideUtility(\n QuotationSourceTypesVocabulary, \n IVocabularyFactory, \n 'quotationtool.quotation.SourceTypes')\n import zope\n import quotationtool\n XMLConfig('meta.zcml', zope.component)()\n XMLConfig('meta.zcml', zope.security)()\n XMLConfig('configure.zcml', zope.security)()\n XMLConfig('configure.zcml', zope.app.schema)()\n XMLConfig('configure.zcml', zope.component)()\n XMLConfig('configure.zcml', zope.security)()\n XMLConfig('configure.zcml', zope.site)()\n XMLConfig('configure.zcml', zope.annotation)()\n XMLConfig('configure.zcml', zope.dublincore)()\n XMLConfig('configure.zcml', quotationtool.site)()\n # subscribers\n from quotationtool.site.interfaces import INewQuotationtoolSiteEvent\n import quotationtool.relation\n zope.component.provideHandler(\n quotationtool.relation.createRelationCatalog,\n adapts=[INewQuotationtoolSiteEvent])\n zope.component.provideHandler(\n quotationtool.quotation.quotation.createRelationIndex,\n adapts=[INewQuotationtoolSiteEvent])\n # container object annotation\n from zope.annotation.interfaces import IAttributeAnnotatable", "def test_connect_fail(self, req):\n req.side_effect = ks_exc.ConnectFailure()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls do\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)", "def test_missing_api_key(self):\n with self.assertRaises(TypeError):\n ConnectorWebexTeams()", "async def test_options_flow_auth_failure(hass):\n\n entry = await setup_platform(hass)\n\n with patch(\n \"aussiebb.asyncio.AussieBB.get_services\", side_effect=AuthenticationException()\n ):\n\n result1 = await hass.config_entries.options.async_init(entry.entry_id)\n assert result1[\"type\"] == RESULT_TYPE_ABORT\n assert result1[\"reason\"] == \"invalid_auth\"", "async def test_zero_conf_failure(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.lidarr.config_flow.LidarrClient.async_try_zeroconf\",\n side_effect=exceptions.ArrZeroConfException,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={CONF_SOURCE: SOURCE_USER},\n data=MOCK_USER_INPUT,\n )\n\n assert result[\"type\"] == data_entry_flow.RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"][\"base\"] == \"zeroconf_failed\"", "def test002_league_commish_settings_no_membership(self):\n\t\tresponse = self.client.get(self.test_url, follow=True)\n\t\tself.assertFalse(is_on_page(response, 'League: league_name1'))\n\t\tself.assertTrue(is_on_page(response, 'Fantasy Web - Home'))", "def test_zendesk_not_configured(self, zendesk_mock_class, datadog_mock):\r\n def test_case(missing_config):\r\n with mock.patch(missing_config, None):\r\n with self.assertRaises(Exception):\r\n self._build_and_run_request(self._anon_user, self._anon_fields)\r\n\r\n test_case(\"django.conf.settings.ZENDESK_URL\")\r\n test_case(\"django.conf.settings.ZENDESK_USER\")\r\n test_case(\"django.conf.settings.ZENDESK_API_KEY\")", "async def test_setup_failed_auth(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n ufp.api.get_nvr = AsyncMock(side_effect=NotAuthorized)\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n assert ufp.entry.state == ConfigEntryState.SETUP_ERROR\n assert not ufp.api.update.called", "def provider_initialize(cls, provider_name, config):\n try:\n provider_obj = config.get_object(provider_name, kind='Provider')\n print \"Provider object {0}\".format(provider_obj)\n except DatastoreException as e:\n raise MOLNSException(\"provider not found\")\n #\n print \"Checking all config artifacts.\"\n # check for ssh key\n if provider_obj['key_name'] is None or provider_obj['key_name'] == '':\n print \"Error: no key_name specified.\"\n return\n elif not provider_obj.check_ssh_key():\n print \"Creating key '{0}'\".format(provider_obj['key_name'])\n provider_obj.create_ssh_key()\n else:\n print \"SSH key={0} is valid.\".format(provider_obj['key_name'])\n\n # check for security group\n if provider_obj['group_name'] is None or provider_obj['group_name'] == '':\n print \"Error: no security group specified.\"\n return\n elif not provider_obj.check_security_group():\n print \"Creating security group '{0}'\".format(provider_obj['group_name'])\n provider_obj.create_seurity_group()\n else:\n print \"security group={0} is valid.\".format(provider_obj['group_name'])\n\n # check for MOLNS image\n if provider_obj['molns_image_name'] is None or provider_obj['molns_image_name'] == '':\n if provider_obj['ubuntu_image_name'] is None or provider_obj['ubuntu_image_name'] == '':\n print \"Error: no ubuntu_image_name given, can not create molns image.\"\n else:\n print \"Creating new image, this process can take a long time (10-30 minutes).\"\n provider_obj['molns_image_name'] = provider_obj.create_molns_image()\n elif not provider_obj.check_molns_image():\n print \"Error: a molns image ID was provided, but it does not exist.\"\n return\n\n print \"Success.\"\n config.save_object(provider_obj, kind='Provider')", "def test_client_verification_create(self):\n pass", "async def test_config_entry_not_ready(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_jellyfin: MagicMock,\n mock_client: MagicMock,\n) -> None:\n mock_client.auth.connect_to_address.return_value = await async_load_json_fixture(\n hass,\n \"auth-connect-address-failure.json\",\n )\n\n mock_config_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY", "def setUp(self):\n self._s1ap_wrapper = s1ap_wrapper.TestWrapper()", "def setUp(self):\n self._s1ap_wrapper = s1ap_wrapper.TestWrapper()", "def test_setup_with_invalid_config(self):\n setup_component(self.hass, \"sensor\", INVALID_CONFIG_MINIMAL)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_summary\")\n assert state is None", "def test_client_key_secret_not_provided(self):\r\n\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n #set another lti_id\r\n self.xmodule.lti_id = \"another_lti_id\"\r\n key_secret = self.xmodule.get_client_key_secret()\r\n expected = ('','')\r\n self.assertEqual(expected, key_secret)", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def _check_settings(self):\n if self.api_key is None:\n raise ImproperlyConfigured(\"You must provide an API key.\")" ]
[ "0.697038", "0.6617586", "0.6555091", "0.5951466", "0.5813457", "0.5800178", "0.5706513", "0.57041866", "0.5636675", "0.56051576", "0.56023586", "0.55914366", "0.5573786", "0.5566785", "0.55414", "0.55256534", "0.5518977", "0.5488716", "0.5402298", "0.5390696", "0.5382451", "0.53727484", "0.5347356", "0.534379", "0.5339954", "0.53159213", "0.52803534", "0.5274853", "0.52648544", "0.52554554", "0.5255066", "0.52542454", "0.5236865", "0.5229157", "0.52267545", "0.5196407", "0.5193604", "0.51897466", "0.51886034", "0.51874363", "0.5175516", "0.51583046", "0.5143858", "0.5124166", "0.5108923", "0.51025856", "0.51025665", "0.5091425", "0.50852895", "0.5075704", "0.50702184", "0.5069184", "0.50673676", "0.5062827", "0.5058995", "0.505701", "0.50564253", "0.5054414", "0.50539887", "0.5049874", "0.50473654", "0.5039278", "0.5038847", "0.50386214", "0.5034593", "0.50335336", "0.5028934", "0.5026448", "0.5017652", "0.5012186", "0.5008398", "0.50056016", "0.50038755", "0.50000405", "0.49998346", "0.49954572", "0.49864757", "0.49848947", "0.49740213", "0.49725953", "0.49672884", "0.49659127", "0.49616137", "0.49611032", "0.49586868", "0.495717", "0.49562418", "0.49547526", "0.49518523", "0.4947306", "0.49432385", "0.49350184", "0.49349937", "0.49317697", "0.4930511", "0.4930511", "0.49279925", "0.49270517", "0.4925708", "0.49249497" ]
0.6533367
3
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present(self): expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_empty_value_override(self):\n\n value_map = {'country': {}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_with_value_default(self):\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,country,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'country': 'Australia'\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)\n\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings),\n default_email='[email protected]'\n )\n self.USER_EMAIL = '[email protected]'\n self._test_register()", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"value\": \"There is a product bought\"}',\n response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/?format=json\", data={\"PLN\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": \"100\"}', response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn('{\"PLN\": \"100\"}', response.content)", "def test_mocked_api_set_new_value(self):\n c = Client()\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(\n self.patch_url, data={\"PLN\": 20, \"EURO\": 20})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)", "def test_entities__FieldCustomization__set_value__2(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())", "def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"", "def test_entities__NoFieldCustomization__set_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n with pytest.raises(NotImplementedError):\n nfc.set_value(IAddressBook['time_zone'], u'label', u'foo')", "def setup_provider(self):\n pass", "def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])", "def setUp(self):\n super().setUp()\n self.mock_requests(get_geocode_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK), \n get_data=copy.deepcopy(test_constants.GET_LIBRARY_API_MOCK))", "def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")", "def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')", "def test_provided_data_takes_precedence_over_environ(self, mock_provider, monkeypatch):\n p = mock_provider()\n prefix = f'mock_'\n monkeypatch.setenv(f'{prefix}{p.provider_name}_required'.upper(), 'foo')\n rsp = p.notify(required='bar', env_prefix=prefix)\n assert rsp.status == 'success'\n assert rsp.data['required'] == 'bar'", "def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])", "def test_update_reg_ex_config(self):\n pass", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)", "def test_value_base_metadata_param(self):\n value = { 'color': 'blue' }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, value=value)\n self.assertEqual(base_meta.value, value)", "def test_set_dict_value_2(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')", "def testMapSetdefault(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap()\n with self.assertRaises(AssertionError):\n m.setdefault(1, data_types.BuildStats())\n with self.assertRaises(AssertionError):\n m.setdefault('1', 2)\n m.setdefault('1', data_types.BuildStats())\n self.assertEqual(m, {'1': data_types.BuildStats()})", "def __init__(self, allow_replace=False):\n self.providers = {}\n self.allow_replace = allow_replace", "def defaults_provider():\n return getattr(defaults_provider, 'overrides', {})", "def test_update_when_value_is_none(self, mock_req):\n self.setup_api(None, mock_req)\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n self.fake_delay(2)\n sensor.update()\n assert sensor.state is None", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def test_patch_user_identity_mapping(self):\n pass", "def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )", "def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_entities__FieldCustomization__set_value__3(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)", "async def test_no_value_template(hass: HomeAssistant, calls) -> None:\n await _register_fan_sources(hass)\n\n with assert_setup_component(1, \"fan\"):\n test_fan_config = {\n \"preset_mode_template\": \"{{ states('input_select.preset_mode') }}\",\n \"percentage_template\": \"{{ states('input_number.percentage') }}\",\n \"oscillating_template\": \"{{ states('input_select.osc') }}\",\n \"direction_template\": \"{{ states('input_select.direction') }}\",\n \"turn_on\": [\n {\n \"service\": \"input_boolean.turn_on\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_on\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"turn_off\": [\n {\n \"service\": \"input_boolean.turn_off\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_off\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"set_preset_mode\": [\n {\n \"service\": \"input_select.select_option\",\n \"data_template\": {\n \"entity_id\": _PRESET_MODE_INPUT_SELECT,\n \"option\": \"{{ preset_mode }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_preset_mode\",\n \"caller\": \"{{ this.entity_id }}\",\n \"option\": \"{{ preset_mode }}\",\n },\n },\n ],\n \"set_percentage\": [\n {\n \"service\": \"input_number.set_value\",\n \"data_template\": {\n \"entity_id\": _PERCENTAGE_INPUT_NUMBER,\n \"value\": \"{{ percentage }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_value\",\n \"caller\": \"{{ this.entity_id }}\",\n \"value\": \"{{ percentage }}\",\n },\n },\n ],\n }\n assert await setup.async_setup_component(\n hass,\n \"fan\",\n {\"fan\": {\"platform\": \"template\", \"fans\": {\"test_fan\": test_fan_config}}},\n )\n\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()\n\n await common.async_turn_on(hass, _TEST_FAN)\n _verify(hass, STATE_ON, 0, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, 0, None, None, None)\n\n percent = 100\n await common.async_set_percentage(hass, _TEST_FAN, percent)\n assert int(float(hass.states.get(_PERCENTAGE_INPUT_NUMBER).state)) == percent\n _verify(hass, STATE_ON, percent, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, None)\n\n preset = \"auto\"\n await common.async_set_preset_mode(hass, _TEST_FAN, preset)\n assert hass.states.get(_PRESET_MODE_INPUT_SELECT).state == preset\n _verify(hass, STATE_ON, percent, None, None, preset)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_set_direction(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_oscillate(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)", "def test_all_providers(self, mock_provider, monkeypatch):\n\n def mock_providers():\n return ['mock']\n\n monkeypatch.setattr(notifiers, 'all_providers', mock_providers)\n\n assert 'mock' in notifiers.all_providers()", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'random_max_len,value_profile,'\n strategy1.probability = 1\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_patch_hyperflex_proxy_setting_policy(self):\n pass", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def testServiceMapping_ByFactory(self):\n self.DoMappingTest({'/my-service': MyService.new_factory('new-value')})", "def testTestExpectationMap(self):\n self._StringToMapHelper(data_types.TestExpectationMap,\n data_types.ExpectationBuilderMap)", "def test_entities__FieldCustomization__set_value__1(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n assert u'Default time zone value 123' == fc.get_value(field, 'label')\n assert u'Default time zone value 123' == fc.query_value(field, 'label')", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": \"555\"}', response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/product_buy/?format=json\", data={\"price\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"product was bought\", response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn({\"account\": \"455\"}, response.content)", "def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite", "def test_get_setting(monkeypatch):\n random_key = uuid()\n default_value, actual_value = \"foo\", \"bar\"\n assert helpers.get_setting(random_key, default_value) == default_value\n monkeypatch.setenv(random_key, actual_value)\n assert helpers.get_setting(random_key, default_value) == actual_value", "def test_handle_value_error(self, runway_context: MockRunwayContext) -> None:\n runway_context.add_stubber(\"ecr\")\n with pytest.raises(ValueError) as excinfo:\n EcrLookup.handle(\"unsupported\", runway_context)\n assert str(excinfo.value) == \"ecr lookup does not support 'unsupported'\"\n with pytest.raises(ValueError):\n EcrLookup.handle(\"unsupported::default=something\", runway_context)", "def test_setup(self):\n self.assertIsNotNone(getattr(self, 'original_good_practice_attrs', None))\n self.assertIsNotNone(getattr(self, 'original_get_overrides', None))\n self.assertIsNotNone(getattr(self, 'original_get_alt_field_info', None))\n self.assertIsNone(getattr(self.form, 'is_prepared', None))\n self.assertNotIn('good_practice_attrs', self.form.has_call)\n self.assertNotIn('get_overrides', self.form.has_call)\n self.assertNotIn('get_alt_field_info', self.form.has_call)\n good_practice = self.form.good_practice_attrs()\n if self.good_practice == 'empty':\n self.assertEqual({}, good_practice)\n overrides = self.form.get_overrides()\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual({}, overrides)\n elif self.overrides_empty_or_skip == 'skip':\n self.assertEqual(self.no_resize_override(), overrides)\n if self.alt_info == 'empty':\n self.assertEqual({}, self.form.get_alt_field_info())\n self.assertIn('get_alt_field_info', self.form.has_call)\n self.assertEqual(self.form.get_alt_field_info.__name__, 'empty_get_alt_field_info')\n self.assertIn('good_practice_attrs', self.form.has_call)\n self.assertIn('get_overrides', self.form.has_call)\n self.form.has_call = []\n self.assertEqual(self.form.good_practice_attrs.__name__, 'empty_good_practice_attrs')\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual(self.form.get_overrides.__name__, 'empty_get_overrides')\n self.assertEqual(self.form.get_overrides.__name__, 'skip_get_overrides')\n request_type = 'POST' if self.get_initial_data() else 'GET'\n self.assertEqual(request_type, self.request.method)", "def test_call_alt_params(self):\r\n otu_picker = BlastOtuPicker({'max_e_value': 1e-30})\r\n expected = {}\r\n actual = otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected)\r\n\r\n self.otu_picker = BlastOtuPicker(\r\n {'max_e_value': 1e-3, 'Similarity': 0.90})\r\n expected_90 = {'ref1': ['s3', 's2', 's1'],\r\n 'ref2': ['s4'],\r\n 'ref3': ['s5'],\r\n 'ref4': ['s6']}\r\n actual = self.otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected_90)", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def mocked_config_file_custom_provider():\n data = \"\"\"[YESSSSMS]\nLOGIN = 06501234567\nPASSWD = MySecre3tPassw0rd\nDEFAULT_TO = +43664123123123\n# MVNO = FANTASYMOBILE\n[YESSSSMS_PROVIDER_URLS]\nLOGIN_URL = mock://kontomanager.at/index.php\nLOGOUT_URL = mock://kontomanager.at/index.php?dologout=2\nKONTOMANAGER_URL = mock://kontomanager.at/kundendaten.php\nWEBSMS_FORM_URL = mock://kontomanager.at/websms.php\nSEND_SMS_URL = mock://kontomanager.at/websms_send.php\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def test_apply_startup_params(self):\n self.assertRaises(NotImplementedException,\n self.driver.apply_startup_params)", "def test_map_view_context_fields_values(\n self,\n mock_installation_statistics_model_overall_counts,\n mock_installation_statistics_model_data_per_period,\n mock_installation_statistics_model_timeline,\n mock_get_data_created_datetime_scope\n ): # pylint: disable=too-many-locals\n mock_timeline = ['2017-05-14', '2017-05-15', '2017-05-16']\n mock_students, mock_courses, mock_instances = [4124, 5122, 6412], [110, 211, 167], [30, 20, 25]\n mock_instances_count, mock_courses_count, mock_students_count, mock_certificates_count = 6412, 167, 25, 0\n mock_first_datetime_of_update_data = datetime(2017, 6, 1, 14, 56, 18)\n mock_last_datetime_of_update_data = datetime(2017, 7, 2, 23, 12, 8)\n\n mock_installation_statistics_model_timeline.return_value = mock_timeline\n\n mock_installation_statistics_model_data_per_period.return_value = mock_students, mock_courses, mock_instances\n\n mock_installation_statistics_model_overall_counts.return_value = {\n \"instances_count\": 6412,\n \"courses_count\": 167,\n \"students_count\": 25,\n \"generated_certificates_count\": 0,\n }\n\n mock_get_data_created_datetime_scope.return_value = \\\n mock_first_datetime_of_update_data, mock_last_datetime_of_update_data\n\n response = self.client.get('/')\n\n self.assertEqual(json.loads(response.context['timeline']), mock_timeline)\n self.assertEqual(json.loads(response.context['students']), mock_students)\n self.assertEqual(json.loads(response.context['courses']), mock_courses)\n self.assertEqual(json.loads(response.context['instances']), mock_instances)\n self.assertEqual(response.context['instances_count'], mock_instances_count)\n self.assertEqual(response.context['students_count'], mock_students_count)\n self.assertEqual(response.context['courses_count'], mock_courses_count)\n self.assertEqual(response.context['generated_certificates_count'], mock_certificates_count)\n self.assertEqual(response.context['first_datetime_of_update_data'], mock_first_datetime_of_update_data)\n self.assertEqual(response.context['last_datetime_of_update_data'], mock_last_datetime_of_update_data)", "def test_settings_proxy_properties_setting(parameters: Dict[str, Any]) -> None:\n settings = Settings()\n settings_proxy = settings.create_proxy()\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))\n\n for key, value in parameters.items():\n settings.__setattr__(key, value)\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))", "def test_config_device_init_with_defaults(get_config, monkeypatch):\n notbase_config = {'not_presented': 1}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', notbase_config)\n cfg = get_config(DeviceConfig, base_config)\n\n assert isinstance(cfg, DeviceConfig), 'wrong class'\n assert cfg.data == notbase_config, 'bad config loaded'", "def setUp(self):\n super().setUp()\n\n self.provider = Provider.objects.filter(type=Provider.PROVIDER_OCP).first()\n\n ocp_metric = metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR\n ocp_source_type = Provider.PROVIDER_OCP\n tiered_rates = [{\"unit\": \"USD\", \"value\": 0.22}]\n self.ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": ocp_source_type,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}, \"tiered_rates\": tiered_rates}],\n \"currency\": \"USD\",\n }\n self.basic_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}}],\n \"currency\": \"USD\",\n }", "async def test_basic_setup(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: ProtectCamera\n):\n\n camera_high_only = mock_camera.copy(deep=True)\n camera_high_only._api = mock_entry.api\n camera_high_only.channels[0]._api = mock_entry.api\n camera_high_only.channels[1]._api = mock_entry.api\n camera_high_only.channels[2]._api = mock_entry.api\n camera_high_only.name = \"Test Camera 1\"\n camera_high_only.id = \"test_high\"\n camera_high_only.channels[0].is_rtsp_enabled = True\n camera_high_only.channels[0].name = \"High\"\n camera_high_only.channels[0].rtsp_alias = \"test_high_alias\"\n camera_high_only.channels[1].is_rtsp_enabled = False\n camera_high_only.channels[2].is_rtsp_enabled = False\n\n camera_medium_only = mock_camera.copy(deep=True)\n camera_medium_only._api = mock_entry.api\n camera_medium_only.channels[0]._api = mock_entry.api\n camera_medium_only.channels[1]._api = mock_entry.api\n camera_medium_only.channels[2]._api = mock_entry.api\n camera_medium_only.name = \"Test Camera 2\"\n camera_medium_only.id = \"test_medium\"\n camera_medium_only.channels[0].is_rtsp_enabled = False\n camera_medium_only.channels[1].is_rtsp_enabled = True\n camera_medium_only.channels[1].name = \"Medium\"\n camera_medium_only.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_medium_only.channels[2].is_rtsp_enabled = False\n\n camera_all_channels = mock_camera.copy(deep=True)\n camera_all_channels._api = mock_entry.api\n camera_all_channels.channels[0]._api = mock_entry.api\n camera_all_channels.channels[1]._api = mock_entry.api\n camera_all_channels.channels[2]._api = mock_entry.api\n camera_all_channels.name = \"Test Camera 3\"\n camera_all_channels.id = \"test_all\"\n camera_all_channels.channels[0].is_rtsp_enabled = True\n camera_all_channels.channels[0].name = \"High\"\n camera_all_channels.channels[0].rtsp_alias = \"test_high_alias\"\n camera_all_channels.channels[1].is_rtsp_enabled = True\n camera_all_channels.channels[1].name = \"Medium\"\n camera_all_channels.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_all_channels.channels[2].is_rtsp_enabled = True\n camera_all_channels.channels[2].name = \"Low\"\n camera_all_channels.channels[2].rtsp_alias = \"test_low_alias\"\n\n camera_no_channels = mock_camera.copy(deep=True)\n camera_no_channels._api = mock_entry.api\n camera_no_channels.channels[0]._api = mock_entry.api\n camera_no_channels.channels[1]._api = mock_entry.api\n camera_no_channels.channels[2]._api = mock_entry.api\n camera_no_channels.name = \"Test Camera 4\"\n camera_no_channels.id = \"test_none\"\n camera_no_channels.channels[0].is_rtsp_enabled = False\n camera_no_channels.channels[0].name = \"High\"\n camera_no_channels.channels[1].is_rtsp_enabled = False\n camera_no_channels.channels[2].is_rtsp_enabled = False\n\n camera_package = mock_camera.copy(deep=True)\n camera_package._api = mock_entry.api\n camera_package.channels[0]._api = mock_entry.api\n camera_package.channels[1]._api = mock_entry.api\n camera_package.channels[2]._api = mock_entry.api\n camera_package.name = \"Test Camera 5\"\n camera_package.id = \"test_package\"\n camera_package.channels[0].is_rtsp_enabled = True\n camera_package.channels[0].name = \"High\"\n camera_package.channels[0].rtsp_alias = \"test_high_alias\"\n camera_package.channels[1].is_rtsp_enabled = False\n camera_package.channels[2].is_rtsp_enabled = False\n package_channel = camera_package.channels[0].copy(deep=True)\n package_channel.is_rtsp_enabled = False\n package_channel.name = \"Package Camera\"\n package_channel.id = 3\n package_channel.fps = 2\n package_channel.rtsp_alias = \"test_package_alias\"\n camera_package.channels.append(package_channel)\n\n mock_entry.api.bootstrap.cameras = {\n camera_high_only.id: camera_high_only,\n camera_medium_only.id: camera_medium_only,\n camera_all_channels.id: camera_all_channels,\n camera_no_channels.id: camera_no_channels,\n camera_package.id: camera_package,\n }\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.CAMERA, 14, 6)\n\n # test camera 1\n entity_id = validate_default_camera_entity(hass, camera_high_only, 0)\n await validate_rtsps_camera_state(hass, camera_high_only, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_high_only, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_high_only, 0, entity_id)\n\n # test camera 2\n entity_id = validate_default_camera_entity(hass, camera_medium_only, 1)\n await validate_rtsps_camera_state(hass, camera_medium_only, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_medium_only, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_medium_only, 1, entity_id)\n\n # test camera 3\n entity_id = validate_default_camera_entity(hass, camera_all_channels, 0)\n await validate_rtsps_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 2, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 2, entity_id)\n\n # test camera 4\n entity_id = validate_default_camera_entity(hass, camera_no_channels, 0)\n await validate_no_stream_camera_state(\n hass, camera_no_channels, 0, entity_id, features=0\n )\n\n # test camera 5\n entity_id = validate_default_camera_entity(hass, camera_package, 0)\n await validate_rtsps_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_package, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_default_camera_entity(hass, camera_package, 3)\n await validate_no_stream_camera_state(\n hass, camera_package, 3, entity_id, features=0\n )", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia860\"] = Eia860Settings()\n values[\"eia861\"] = Eia861Settings()\n values[\"eia923\"] = Eia923Settings()\n\n return values", "async def test_form_multiple_services(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=FAKE_SERVICES):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_FORM\n assert result2[\"step_id\"] == \"service\"\n assert result2[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]]},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result3[\"title\"] == TEST_USERNAME\n assert result3[\"data\"] == FAKE_DATA\n assert result3[\"options\"] == {\n CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]],\n }\n assert len(mock_setup_entry.mock_calls) == 1", "def requires_mapping(self):", "def test_set_dict_value_3(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"C\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"C\")", "def test_real_value(self):\n setting_model = Setting(python_type='list', dry_value='')\n self.assertEqual(setting_model.value, [])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Lancelot,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Lancelot', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Robin,Patsy',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Robin', 'Patsy'])", "def used_implementation(self, impl: str, value):", "def test_input_identifier_types(self):\n # It's okay to set INPUT_IDENTIFIER_TYPES to None it means you\n # will cover any and all identifier types.\n class Base(IdentifierCoverageProvider):\n SERVICE_NAME = \"Test provider\"\n DATA_SOURCE_NAME = DataSource.GUTENBERG\n\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = None\n provider = MockProvider(self._db)\n assert None == provider.input_identifier_types\n\n # It's okay to set a single value.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = Identifier.ISBN\n provider = MockProvider(self._db)\n assert [Identifier.ISBN] == provider.input_identifier_types\n\n # It's okay to set a list of values.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = [Identifier.ISBN, Identifier.OVERDRIVE_ID]\n provider = MockProvider(self._db)\n assert ([Identifier.ISBN, Identifier.OVERDRIVE_ID] ==\n provider.input_identifier_types)\n\n # It's not okay to do nothing.\n class MockProvider(Base):\n pass\n with pytest.raises(ValueError) as excinfo:\n MockProvider(self._db)\n assert \"MockProvider must define INPUT_IDENTIFIER_TYPES, even if the value is None.\" in str(excinfo.value)", "def _verify_map(self):\n # ---- verify self.info ----\n if self.info['contype'] == NotImplemented:\n # 'contype' must be defined\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n elif self.info['contype'] not in ['motion',\n 'waveform',\n 'power']:\n # 'contype' must be one of specified type\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n\n # ---- verity self.configs ----\n # 'dataset fields' must be defined\n if 'dataset fields' not in self.configs:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n elif self.configs['dataset fields'] == NotImplemented:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n\n # 'dset field to numpy field' must be defined\n # - each 'dataset field' needs a mapping to a structured numpy\n # field for hdfReadControl\n # - 'dset field to numpy field' is a list of 3-element tuples\n # where each entry in the list corresponds to a dataset field\n # name\n # - the 3-element tuple must follow the format:\n #\n # self.configs['dset field to numpy field'][i] = (\n # str, # dataset field name\n # str, # corresponding structured numpy field name\n # int) # index of structured numpy field\n #\n # For example, the '6K Compumotor would look like...\n # self.configs['dset field to numpy'] = [\n # ('x', 'xyz', 0),\n # ('y', 'xyz', 1),\n # ('z', 'xyz', 2)]\n #\n key = 'dset field to numpy field'\n if key not in self.configs:\n raise NotImplementedError\n elif self.configs[key] == NotImplemented:\n raise NotImplementedError\n elif type(self.configs[key]) is not list:\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(isinstance(val, tuple)\n for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(len(val) == 3 for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n else:\n err = False\n dset_fields = [name\n for name, dftype\n in self.configs['dataset fields']]\n for dfname, npfname, npi in self.configs[key]:\n if dfname not in dset_fields:\n err = True\n break\n elif type(npfname) is not str:\n err = True\n break\n elif type(npi) is not int:\n err = True\n break\n if err:\n errstr = \"self.configs['dset field to numpy field] \" \\\n + \"must be a list of 3-element tuples\"\n raise Exception(errstr)\n\n # contype == 'motion' specific verification\n if self.contype == 'motion':\n # verify 'motion list'\n if 'motion list' not in self.configs:\n # 'motion list' exists\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['motion list'] == NotImplemented:\n # 'motion list' is defined\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'motion list' must have its own config\n for name in self.configs['motion list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['motion \" \\\n \"name'] for each motion list in \" \\\n \"self.configs['motion list'] = \" \\\n \"[motion name, ]\"\n raise NotImplementedError(errstr)\n\n # verify 'probe list'\n if 'probe list' not in self.configs:\n # 'probe list' exists\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['probe list'] == NotImplemented:\n # 'probe list' is defined\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'probe list' must have its own config\n for name in self.configs['probe list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['probe \" \\\n \"name'] for each probe in \" \\\n \"self.configs['probe list'] = \" \\\n \"[probe name, ]\"\n raise NotImplementedError(errstr)\n\n # delete 'config names' if present\n if 'config names' in self.configs:\n del self.configs['config names']\n\n # verify all other contypes\n if self.contype != 'motion':\n # remove 'motion list'\n if 'motion list' in self.configs:\n # remove 'motion list' children\n for name in self.configs['motion list']:\n if name in self.configs:\n del(self.configs[name])\n\n # remove 'motion list'\n del(self.configs['motion list'])\n\n # remove 'probe list'\n if 'probe list' in self.configs:\n # remove 'probe list' children\n for name in self.configs['probe list']:\n if name in self.configs:\n del (self.configs[name])\n\n # remove 'motion list'\n del (self.configs['probe list'])\n\n # verify 'command list'\n # if 'command list' not in self.configs:\n # # 'command list' exists\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)\n # elif self.configs['command list'] == NotImplemented:\n # # 'motion list' is defined\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)", "def test_prep_overrides(self):\n original_data = self.form.data\n test_data = original_data.copy()\n test_data._mutable = False\n self.form.data = test_data # copied only to allow tear-down reverting to original.\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n self.form.fields = test_fields # copied to allow tear-down reverting to original.\n original_get_overrides = self.form.get_overrides\n def replace_overrides(): return self.formfield_attrs_overrides\n self.form.get_overrides = replace_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = {}\n overrides = self.formfield_attrs_overrides.copy()\n DEFAULT = overrides.pop('_default_')\n expected_attrs = {}\n for name, field in test_fields.items():\n attrs = field.widget.attrs.copy()\n if isinstance(field.widget, (RadioSelect, CheckboxSelectMultiple, CheckboxInput, )):\n pass # update if similar section in prep_fields is updated.\n attrs.update(overrides.get(name, {}))\n # TODO: setup structure for using default or defined version for all CharFields.\n no_resize = overrides.get(name, {}).pop('no_size_override', False)\n no_resize = True if isinstance(field.widget, (HiddenInput, MultipleHiddenInput)) else no_resize\n if no_resize:\n expected_attrs[name] = attrs\n continue # None of the following size overrides are applied for this field.\n if isinstance(field.widget, Textarea):\n width_attr_name = 'cols'\n default = DEFAULT.get('cols', None)\n display_size = attrs.get('cols', None)\n if 'rows' in DEFAULT:\n height = attrs.get('rows', None)\n height = min((DEFAULT['rows'], int(height))) if height else DEFAULT['rows']\n attrs['rows'] = str(height)\n if default: # For textarea, we always override. The others depend on different conditions.\n display_size = display_size or default\n display_size = min((int(display_size), int(default)))\n elif issubclass(field.__class__, CharField):\n width_attr_name = 'size' # 'size' is only valid for input types: email, password, tel, text\n default = DEFAULT.get('size', None) # Cannot use float(\"inf\") as an int.\n display_size = attrs.get('size', None)\n else: # This field does not have a size setting.\n width_attr_name, default, display_size = None, None, None\n input_size = attrs.get('maxlength', None)\n possible_size = [int(ea) for ea in (display_size or default, input_size) if ea]\n # attrs['size'] = str(int(min(float(display_size), float(input_size)))) # Can't use float(\"inf\") as an int.\n if possible_size and width_attr_name:\n attrs[width_attr_name] = str(min(possible_size))\n expected_attrs[name] = attrs\n # Expected:\n # formfield_attrs_overrides = {\n # '_default_': {'size': 15, 'cols': 20, 'rows': 4, },\n # 'first': {'maxlength': 191, 'size': 20, },\n # 'second': {'maxlength': 2, }, # 'size': 2,\n # 'last': {'maxlength': 2, 'size': 5, },\n # }\n result_fields = self.form.prep_fields()\n result_attrs = {name: field.widget.attrs.copy() for name, field in result_fields.items()}\n first_maxlength = expected_attrs['first']['maxlength'] # overrides['first']['maxlength']\n first_size = expected_attrs['first']['size'] # overrides['first']['size']\n second_maxlength = expected_attrs['second']['maxlength'] # overrides['second']['maxlength']\n last_maxlength = expected_attrs['last']['maxlength'] # overrides['last']['maxlength']\n last_size = expected_attrs['last']['size'] # overrides['last']['size']\n\n self.assertEqual(first_maxlength, result_fields['first'].widget.attrs.get('maxlength', None))\n self.assertEqual(first_size, result_fields['first'].widget.attrs.get('size', None))\n self.assertEqual(second_maxlength, result_fields['second'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_maxlength, result_fields['last'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_size, result_fields['last'].widget.attrs.get('size', None))\n for key, val in expected_attrs.items():\n self.assertEqual(val, result_attrs[key])\n self.assertDictEqual(expected_attrs, result_attrs)\n\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides", "def test__authenticate_with_custom_port(self):\n get_cloud_config_mock = [\n \"proxmox.connection.url\",\n \"9999\",\n \"fakeuser\",\n \"secretpassword\",\n True,\n ]\n requests_post_mock = MagicMock()\n with patch(\n \"salt.config.get_cloud_config_value\",\n autospec=True,\n side_effect=get_cloud_config_mock,\n ), patch(\"requests.post\", requests_post_mock):\n proxmox._authenticate()\n requests_post_mock.assert_called_with(\n \"https://proxmox.connection.url:9999/api2/json/access/ticket\",\n verify=True,\n data={\"username\": (\"fakeuser\",), \"password\": \"secretpassword\"},\n )", "def test_function_callparams(self):\n\n @Configurable(conf=category('', Parameter('test', value=True)))\n def twist(test=None):\n return test\n\n value = twist()\n\n self.assertTrue(value)\n\n value = twist(False)\n\n self.assertFalse(value)\n\n Configurable.get_annotations(twist)[0].conf['']['test'].value = 1\n\n value = twist()\n\n self.assertEqual(value, 1)", "def test_bad_ext_app_setting(self, mock_settings):\n lang, _ = Language.objects.get_or_create(name=\"Test Language\", code=\"text-x-lang\")\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": None})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": {}})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": ()})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": \" \"})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1.0})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia\"] = EiaSettings()\n values[\"epacems\"] = EpaCemsSettings()\n values[\"ferc1\"] = Ferc1Settings()\n values[\"ferc714\"] = Ferc714Settings()\n values[\"glue\"] = GlueSettings()\n\n return values", "async def test_plenticore_async_setup_g1(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_apiclient: ApiClient,\n) -> None:\n mock_apiclient.get_settings = AsyncMock(\n return_value={\"scb:network\": [SettingsData({\"id\": \"Hostname\"})]}\n )\n mock_apiclient.get_setting_values = AsyncMock(\n # G1 model has the entry id \"Hostname\"\n return_value={\n \"devices:local\": {\n \"Properties:SerialNo\": \"12345\",\n \"Branding:ProductName1\": \"PLENTICORE\",\n \"Branding:ProductName2\": \"plus 10\",\n \"Properties:VersionIOC\": \"01.45\",\n \"Properties:VersionMC\": \"01.46\",\n },\n \"scb:network\": {\"Hostname\": \"scb\"},\n }\n )\n\n mock_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n plenticore = hass.data[DOMAIN][mock_config_entry.entry_id]\n\n assert plenticore.device_info == DeviceInfo(\n configuration_url=\"http://192.168.1.2\",\n identifiers={(DOMAIN, \"12345\")},\n manufacturer=\"Kostal\",\n model=\"PLENTICORE plus 10\",\n name=\"scb\",\n sw_version=\"IOC: 01.45 MC: 01.46\",\n )", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {0: ['cdhit_test_seqs_0'],\r\n 1: ['cdhit_test_seqs_1'],\r\n 2: ['cdhit_test_seqs_2'],\r\n 3: ['cdhit_test_seqs_3'],\r\n 4: ['cdhit_test_seqs_4'],\r\n 5: ['cdhit_test_seqs_5'],\r\n 6: ['cdhit_test_seqs_6'],\r\n 7: ['cdhit_test_seqs_7'],\r\n 8: ['cdhit_test_seqs_8'],\r\n 9: ['cdhit_test_seqs_9']}\r\n\r\n app = CdHitOtuPicker(params={})\r\n obs = app(self.tmp_seq_filepath1)\r\n self.assertEqual(obs, exp)", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def test_get_user_settings(client, jwt, session, keycloak_mock, monkeypatch): # pylint:disable=unused-argument\n user_model = factory_user_model(user_info=TestUserInfo.user_test)\n contact = factory_contact_model()\n contact_link = ContactLinkModel()\n contact_link.contact = contact\n contact_link.user = user_model\n contact_link.commit()\n kc_id = user_model.keycloak_guid\n\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id)\n claims['idp_userid'] = str(user_model.idp_userid)\n patch_token_info(claims, monkeypatch)\n\n OrgService.create_org(TestOrgInfo.org_branch_name, user_id=user_model.id)\n\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id}/settings', headers=headers, content_type='application/json')\n item_list = rv.json\n account = next(obj for obj in item_list if obj['type'] == 'ACCOUNT')\n assert account['accountType'] == 'BASIC'\n assert account['additionalLabel'] == TestOrgInfo.org_branch_name.get('branchName')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n assert account['productSettings'] == f'/account/{account[\"id\"]}/restricted-product'\n\n kc_id_no_user = TestUserInfo.user1.get('keycloak_guid')\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id_no_user)\n patch_token_info(claims, monkeypatch)\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id_no_user}/settings', headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n item_list = rv.json\n account = next((obj for obj in item_list if obj['type'] == 'ACCOUNT'), None)\n assert account is None\n user_profile = next(obj for obj in item_list if obj['type'] == 'USER_PROFILE')\n assert '/userprofile' in user_profile.get('urlpath')", "async def test_setup_multiple(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n bootstrap: Bootstrap,\n) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac\n\n nvr = bootstrap.nvr\n nvr._api = ufp.api\n nvr.mac = \"A1E00C826983\"\n nvr.id\n ufp.api.get_nvr = AsyncMock(return_value=nvr)\n\n with patch(\n \"homeassistant.components.unifiprotect.utils.ProtectApiClient\"\n ) as mock_api:\n mock_config = MockConfigEntry(\n domain=DOMAIN,\n data={\n \"host\": \"1.1.1.1\",\n \"username\": \"test-username\",\n \"password\": \"test-password\",\n \"id\": \"UnifiProtect\",\n \"port\": 443,\n \"verify_ssl\": False,\n },\n version=2,\n )\n mock_config.add_to_hass(hass)\n\n mock_api.return_value = ufp.api\n\n await hass.config_entries.async_setup(mock_config.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert mock_config.unique_id == ufp.api.bootstrap.nvr.mac", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 1\n strategy1.engine = 'afl'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_entities__FieldCustomization__default_value__1(address_book):\n fc = IFieldCustomization(address_book)\n assert u'Time zone' == fc.default_value(IAddressBook['time_zone'], 'label')", "def test_sanity(self, mock_provider):\n p = mock_provider()\n assert p.metadata == {'base_url': 'https://api.mock.com',\n 'provider_name': 'mock_provider',\n 'site_url': 'https://www.mock.com'}\n assert p.arguments == {\n 'not_required': {\n 'oneOf': [\n {'items': {'type': 'string'}, 'minItems': 1, 'type': 'array', 'uniqueItems': True},\n {'type': 'string'}\n ]\n },\n 'required': {'type': 'string'},\n 'message': {'type': 'string'},\n 'option_with_default': {'type': 'string'}\n }\n\n assert p.required == ['required']\n rsp = p.notify(**self.valid_data)\n assert isinstance(rsp, Response)\n assert not rsp.errors\n assert rsp.raise_on_errors() is None\n assert repr(rsp) == '<Response,provider=Mock_provider,status=success>'\n assert repr(p) == '<Provider:[Mock_provider]>'", "def test_get_setting(monkeypatch):\n resp = str(uuid.uuid4())\n arg = str(uuid.uuid4())\n kwarg = str(uuid.uuid4())\n get_secret = Mock(return_value=resp)\n monkeypatch.setattr(\"lambdautils.state.get_secret\", get_secret)\n resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)\n assert resp2 == resp\n get_secret.assert_called_with(arg, kwarg=kwarg)", "def _validate_provider(self, name_or_uuid, **kwargs):\n found = self.client._provider_tree.data(name_or_uuid)\n # If kwargs provided, their names indicate ProviderData attributes\n for attr, expected in kwargs.items():\n try:\n self.assertEqual(getattr(found, attr), expected)\n except AttributeError:\n self.fail(\"Provider with name or UUID %s doesn't have \"\n \"attribute %s (expected value: %s)\" %\n (name_or_uuid, attr, expected))", "def test_set(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )\n\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 1)\n obj = AppSetting.objects.get(name=setting_name, user=self.user)\n self.assertEqual(obj.get_value(), 'value')\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def valid_config(hass: HomeAssistant, requests_mock):\n requests_mock.get(\n \"https://api.vultr.com/v1/account/info?api_key=ABCDEFG1234567\",\n text=load_fixture(\"account_info.json\", \"vultr\"),\n )\n\n with patch(\n \"vultr.Vultr.server_list\",\n return_value=json.loads(load_fixture(\"server_list.json\", \"vultr\")),\n ):\n # Setup hub\n vultr.setup(hass, VALID_CONFIG)", "def test_patch_hyperflex_ucsm_config_policy(self):\n pass", "def test_claims_supported_not_set(self):\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], [])", "def test_bad_custom_params(self):\r\n bad_custom_params = ['test_custom_params: test_custom_param_value']\r\n self.xmodule.custom_parameters = bad_custom_params\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n with self.assertRaises(LTIError):\r\n self.xmodule.get_input_fields()", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def test_default_customer_app_map_search(self):\n pass", "def test_inputs_suitor_prefs(resident_names, hospital_names, capacities, seed):\n\n _, _, match = _make_match(resident_names, hospital_names, capacities, seed)\n\n assert match._check_suitor_prefs()\n\n match.suitors[0].pref_names = [1, 2, 3]\n\n with pytest.raises(Exception):\n match._check_suitor_prefs()", "def test__VerificationFieldPlatform__value():\n for instance in VerificationFieldPlatform.INSTANCES.values():\n vampytest.assert_instance(instance.value, VerificationFieldPlatform.VALUE_TYPE)" ]
[ "0.7055314", "0.6939393", "0.66155446", "0.63423544", "0.576975", "0.5697624", "0.5679179", "0.55829513", "0.5382714", "0.5345334", "0.5325174", "0.53106457", "0.5242109", "0.52059597", "0.5203102", "0.5186651", "0.5169315", "0.51328266", "0.51315385", "0.5126731", "0.5115003", "0.50994205", "0.5093414", "0.50820154", "0.5074523", "0.5061159", "0.5037727", "0.50352234", "0.50327957", "0.5018553", "0.50175124", "0.50126547", "0.5012161", "0.50014883", "0.5001277", "0.4996847", "0.49957845", "0.4989725", "0.49859405", "0.4979983", "0.49636325", "0.49628103", "0.49526268", "0.49485505", "0.49423316", "0.49180043", "0.49076396", "0.49003863", "0.4899193", "0.48928812", "0.48889008", "0.48886302", "0.48882446", "0.4886075", "0.48834777", "0.48825428", "0.4880589", "0.4880027", "0.48777232", "0.48777232", "0.48669815", "0.4863833", "0.48586407", "0.48578447", "0.48539978", "0.48506185", "0.4849006", "0.48395336", "0.4838721", "0.48384526", "0.4837042", "0.48263034", "0.48259896", "0.48249537", "0.48199266", "0.48183042", "0.48163012", "0.48143882", "0.4813749", "0.4812405", "0.4809102", "0.48063162", "0.4802421", "0.48022398", "0.48009628", "0.4797333", "0.4797317", "0.4795819", "0.47936177", "0.47829607", "0.4781316", "0.47804084", "0.47801426", "0.47722742", "0.47705948", "0.4768243", "0.4761191", "0.4757908", "0.47506016", "0.47503716" ]
0.50434846
26
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided it should use the default value provided by the configuration.
def test_register_sapsf_with_value_default(self): # Mock the call to the SAP SuccessFactors OData user endpoint ODATA_USER_URL = ( 'http://api.successfactors.com/odata/v2/User(userId=\'myself\')' '?$select=firstName,country,lastName,defaultFullName,email' ) def user_callback(request, _uri, headers): auth_header = request.headers.get('Authorization') assert auth_header == 'Bearer faketoken' return ( 200, headers, json.dumps({ 'd': { 'username': 'jsmith', 'firstName': 'John', 'lastName': 'Smith', 'defaultFullName': 'John Smith', 'country': 'Australia' } }) ) httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback) provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings), default_email='[email protected]' ) self.USER_EMAIL = '[email protected]' self._test_register()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_empty_value_override(self):\n\n value_map = {'country': {}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def defaults_provider():\n return getattr(defaults_provider, 'overrides', {})", "def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")", "def setup_provider(self):\n pass", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"value\": \"There is a product bought\"}',\n response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/?format=json\", data={\"PLN\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": \"100\"}', response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn('{\"PLN\": \"100\"}', response.content)", "def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"", "def test_mocked_api_set_new_value(self):\n c = Client()\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(\n self.patch_url, data={\"PLN\": 20, \"EURO\": 20})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)", "def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def test_config_device_init_with_defaults(get_config, monkeypatch):\n notbase_config = {'not_presented': 1}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', notbase_config)\n cfg = get_config(DeviceConfig, base_config)\n\n assert isinstance(cfg, DeviceConfig), 'wrong class'\n assert cfg.data == notbase_config, 'bad config loaded'", "def test_entities__FieldCustomization__set_value__2(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def mocked_config_file_custom_provider():\n data = \"\"\"[YESSSSMS]\nLOGIN = 06501234567\nPASSWD = MySecre3tPassw0rd\nDEFAULT_TO = +43664123123123\n# MVNO = FANTASYMOBILE\n[YESSSSMS_PROVIDER_URLS]\nLOGIN_URL = mock://kontomanager.at/index.php\nLOGOUT_URL = mock://kontomanager.at/index.php?dologout=2\nKONTOMANAGER_URL = mock://kontomanager.at/kundendaten.php\nWEBSMS_FORM_URL = mock://kontomanager.at/websms.php\nSEND_SMS_URL = mock://kontomanager.at/websms_send.php\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)", "def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])", "def set_provider(self, provider):\n \n check = self.check_provider(provider)\n if check is not None:\n self.default_provider = provider\n else:\n return None", "def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')", "def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])", "def test_entities__NoFieldCustomization__set_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n with pytest.raises(NotImplementedError):\n nfc.set_value(IAddressBook['time_zone'], u'label', u'foo')", "def setup(override: str=''):\n\n try:\n base_config_data = open(BASE_CONFIGURATION).read()\n base_config = json.loads(base_config_data)\n except FileNotFoundError:\n logging.error('Base configuration file in config/base.json not found.')\n raise RuntimeError('Base configuration file not found.')\n\n # Check if override is required\n if override is not '':\n try:\n override_config_data = open('config/{0}'.format(override)).read()\n override_config = json.loads(override_config_data)\n except FileNotFoundError:\n logging.error('Override configuration file config/{0} not found.')\n raise RuntimeError('Invalid configuraiton override file.')\n\n # Update base config with override parameters\n base_config = update(base_config, override_config)\n\n # Add to parameters\n global Parameters\n Parameters.__dict__.update(base_config)", "def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def setdefault(self, value: Any) -> None:\n self.default_factory = value \n return", "def __init__(self, allow_replace=False):\n self.providers = {}\n self.allow_replace = allow_replace", "def test_provided_data_takes_precedence_over_environ(self, mock_provider, monkeypatch):\n p = mock_provider()\n prefix = f'mock_'\n monkeypatch.setenv(f'{prefix}{p.provider_name}_required'.upper(), 'foo')\n rsp = p.notify(required='bar', env_prefix=prefix)\n assert rsp.status == 'success'\n assert rsp.data['required'] == 'bar'", "def setdefault(self, value: Any) -> None: # type: ignore\n self.default_factory = value \n return", "def test_get_with_default(self):\n self.assertEqual(self.config.get('basic','salutation'),None)\n self.assertEqual(self.config.get('basic','salutation','bonjour'),\n 'bonjour')", "def post_init(cr, registry):\n from ecore import SUPERUSER_ID\n from ecore.addons.base.ir.ir_config_parameter import _default_parameters\n ICP = registry['ir.config_parameter']\n for k, func in _default_parameters.items():\n v = ICP.get_param(cr, SUPERUSER_ID, k)\n _, g = func()\n ICP.set_param(cr, SUPERUSER_ID, k, v, g)", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)", "def test_defaults_overrides_with_settings(settings):\n\n settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (\n \"howard.documents.DocumentIssuerChoices\"\n )\n settings.MARION_DOCUMENTS_ROOT = Path(\"/tmp/documents/abc\")\n settings.MARION_DOCUMENTS_TEMPLATE_ROOT = Path(\"howard/documents/abc\")\n\n # Force module reload to take into account setting override as it is loaded\n # very early in the stack\n importlib.reload(defaults)\n\n assert (\n defaults.DOCUMENT_ISSUER_CHOICES_CLASS\n == \"howard.documents.DocumentIssuerChoices\"\n )\n assert defaults.DOCUMENTS_ROOT == Path(\"/tmp/documents/abc\")\n assert defaults.DOCUMENTS_TEMPLATE_ROOT == Path(\"howard/documents/abc\")", "def test_get_setting(monkeypatch):\n random_key = uuid()\n default_value, actual_value = \"foo\", \"bar\"\n assert helpers.get_setting(random_key, default_value) == default_value\n monkeypatch.setenv(random_key, actual_value)\n assert helpers.get_setting(random_key, default_value) == actual_value", "def test_setting_override(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"test.html\")", "def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite", "def set_values(self):\n super(ResConfigInherit, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param(\n 'sale_stock_restrict.product_restriction', self.product_restriction)\n self.env['ir.config_parameter'].sudo().set_param(\n 'sale_stock_restrict.check_stock', self.check_stock)", "def test__authenticate_with_custom_port(self):\n get_cloud_config_mock = [\n \"proxmox.connection.url\",\n \"9999\",\n \"fakeuser\",\n \"secretpassword\",\n True,\n ]\n requests_post_mock = MagicMock()\n with patch(\n \"salt.config.get_cloud_config_value\",\n autospec=True,\n side_effect=get_cloud_config_mock,\n ), patch(\"requests.post\", requests_post_mock):\n proxmox._authenticate()\n requests_post_mock.assert_called_with(\n \"https://proxmox.connection.url:9999/api2/json/access/ticket\",\n verify=True,\n data={\"username\": (\"fakeuser\",), \"password\": \"secretpassword\"},\n )", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {0: ['cdhit_test_seqs_0'],\r\n 1: ['cdhit_test_seqs_1'],\r\n 2: ['cdhit_test_seqs_2'],\r\n 3: ['cdhit_test_seqs_3'],\r\n 4: ['cdhit_test_seqs_4'],\r\n 5: ['cdhit_test_seqs_5'],\r\n 6: ['cdhit_test_seqs_6'],\r\n 7: ['cdhit_test_seqs_7'],\r\n 8: ['cdhit_test_seqs_8'],\r\n 9: ['cdhit_test_seqs_9']}\r\n\r\n app = CdHitOtuPicker(params={})\r\n obs = app(self.tmp_seq_filepath1)\r\n self.assertEqual(obs, exp)", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def setUp(self):\n super().setUp()\n self.mock_requests(get_geocode_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK), \n get_data=copy.deepcopy(test_constants.GET_LIBRARY_API_MOCK))", "def test_update_reg_ex_config(self):\n pass", "def test_returns_configured_apiurl_over_default(self):\n arguments = {'--api-url': None}\n config = {'api_url': 'configured_stuff'}\n result = get_api_url(arguments, config)\n self.assertEqual(result, 'configured_stuff')\n self.mock_sanitize_host.assert_not_called()", "def test_entities__FieldCustomization__default_value__1(address_book):\n fc = IFieldCustomization(address_book)\n assert u'Time zone' == fc.default_value(IAddressBook['time_zone'], 'label')", "def _override(config, overrides):\n for key, value in overrides.iteritems():\n if key not in config:\n raise KeyError(\"Unrecognized parameter: %s\" % key)\n config[key] = value", "def __init__(__self__, *,\n override_settings: Optional[pulumi.Input['UpgradeOverrideSettingsArgs']] = None):\n if override_settings is not None:\n pulumi.set(__self__, \"override_settings\", override_settings)", "def initialiseOverride(self):\n overrideConf = self.overrideConf\n overrideParams = {\n \"command\" : None,\n \"option\" : None,\n \"phedex-node\" : None,\n \"lfn-prefix\" : None,\n }\n\n try:\n overrideParams['command'] = overrideConf['command']\n overrideParams['phedex-node'] = overrideConf['phedex-node']\n overrideParams['lfn-prefix'] = overrideConf['lfn-prefix']\n except Exception as ex:\n msg = \"Unable to extract Override parameters from config:\\n\"\n msg += str(overrideConf)\n raise StageOutInitError(msg)\n if 'option' in overrideConf:\n if len(overrideConf['option']) > 0:\n overrideParams['option'] = overrideConf['option']\n else:\n overrideParams['option'] = \"\"\n\n msg = \"=======StageIn Override Initialised:================\\n\"\n for key, val in viewitems(overrideParams):\n msg += \" %s : %s\\n\" % (key, val)\n msg += \"=====================================================\\n\"\n print(msg)\n self.fallbacks = []\n self.fallbacks.append(overrideParams)\n return", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )", "def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')", "def test_custom_configuration_updated(self):\n component_protocol_id = ComponentId(\n ComponentType.PROTOCOL, self.new_protocol_id\n )\n component_contract_id = ComponentId(\n ComponentType.CONTRACT, self.new_contract_id\n )\n component_connection_id = ComponentId(\n ComponentType.CONNECTION, self.new_connection_id\n )\n component_skill_id = ComponentId(ComponentType.SKILL, self.new_skill_id)\n\n assert (\n self.agent_config.component_configurations[component_protocol_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_contract_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_connection_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_skill_id]\n == self.expected_custom_component_configuration\n )", "def test_patch_hyperflex_proxy_setting_policy(self):\n pass", "def test_server_override_general(self):\n # Sanity check our override values do not overlap\n self.assertNotEqual(CONFIG_DATA[\"ConcurrentWorkers\"],\n CONFIG_DATA[\"OverrideConcurrentWorkers\"])\n self.assertNotEqual(CONFIG_DATA[\"SaveTimeoutMinutes\"],\n CONFIG_DATA[\"OverrideSaveTimeoutMinutes\"])\n self.assertNotEqual(CONFIG_DATA[\"RetainImageMinutes\"],\n CONFIG_DATA[\"OverrideRetainImageMinutes\"])\n self.assertNotEqual(CONFIG_DATA[\"Region\"],\n CONFIG_DATA[\"OverrideRegion\"])\n config_data = imageroller.main.read_config(\n self._cmd_args,\n imageroller.test.get_config_parser(\n self._server_valid_override))\n # Verify default disabled server is not included\n self.assertNotIn(\n CONFIG_DATA[\"OverrideNotExistFQDN\"],\n [server_data.name for server_data in\n config_data.server_data])\n # Sanity check we have every server's config we expect to have\n self.assertSetEqual(\n set([server_data.name for server_data in\n config_data.server_data]),\n {CONFIG_DATA[\"OverrideWorkersFQDN\"],\n CONFIG_DATA[\"OverrideSaveTimeoutFQDN\"],\n CONFIG_DATA[\"OverrideRetainImageFQDN\"],\n CONFIG_DATA[\"OverrideRegionFQDN\"]},\n )\n # Smoke test they are all enabled\n self.assertTrue(all([server_data.enabled\n for server_data in\n config_data.server_data]))", "def used_implementation(self, impl: str, value):", "def test_value_base_metadata_param(self):\n value = { 'color': 'blue' }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, value=value)\n self.assertEqual(base_meta.value, value)", "def setUp(self):\n super().setUp()\n\n self.provider = Provider.objects.filter(type=Provider.PROVIDER_OCP).first()\n\n ocp_metric = metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR\n ocp_source_type = Provider.PROVIDER_OCP\n tiered_rates = [{\"unit\": \"USD\", \"value\": 0.22}]\n self.ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": ocp_source_type,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}, \"tiered_rates\": tiered_rates}],\n \"currency\": \"USD\",\n }\n self.basic_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}}],\n \"currency\": \"USD\",\n }", "def test_apply_startup_params(self):\n self.assertRaises(NotImplementedException,\n self.driver.apply_startup_params)", "def testDefaults(self, widget):\n assert isinstance(widget.highlight, PythonHighlighter)\n assert isinstance(widget.parameter_dict, dict)\n assert isinstance(widget.pd_parameter_dict, dict)\n\n assert len(widget.model) == 6\n assert \"filename\" in widget.model.keys()\n assert \"overwrite\" in widget.model.keys()\n assert \"description\" in widget.model.keys()\n assert \"parameters\" in widget.model.keys()\n assert \"pd_parameters\" in widget.model.keys()\n assert \"text\" in widget.model.keys()", "def _settings(self, library):\n if UserRegistration.LIBRARY is not None:\n if UserRegistration.LIBRARY == \"requests\":\n self._set_requests()\n elif UserRegistration.LIBRARY == \"urllib2\":\n self._set_urllib2()\n else:\n raise Exceptions.InvalidLibrary(UserRegistration.LIBRARY)\n else:\n if library == \"requests\":\n self._set_requests()\n elif library == \"urllib2\":\n self._set_urllib2()\n else:\n raise Exceptions.InvalidLibrary(library)", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia\"] = EiaSettings()\n values[\"epacems\"] = EpaCemsSettings()\n values[\"ferc1\"] = Ferc1Settings()\n values[\"ferc714\"] = Ferc714Settings()\n values[\"glue\"] = GlueSettings()\n\n return values", "def _default_value(self, addr, size, name=None, inspect=True, events=True, key=None, **kwargs):\n pass", "def test_adapter_opts_set(self):\n conn = self._get_conn()\n\n discovery = {\n \"versions\": {\n \"values\": [\n {\n \"status\": \"stable\",\n \"updated\": \"2019-06-01T00:00:00Z\",\n \"media-types\": [\n {\n \"base\": \"application/json\",\n \"type\": \"application/vnd.openstack.heat-v2+json\", # noqa: E501\n }\n ],\n \"id\": \"v2.0\",\n \"links\": [\n {\n \"href\": \"https://example.org:8888/heat/v2\",\n \"rel\": \"self\",\n }\n ],\n }\n ]\n }\n }\n self.register_uris(\n [\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2',\n json=discovery,\n ),\n dict(\n method='GET',\n uri='https://example.org:8888/heat/v2/foo',\n json={'foo': {}},\n ),\n ]\n )\n\n adap = conn.orchestration\n self.assertEqual('SpecialRegion', adap.region_name)\n self.assertEqual('orchestration', adap.service_type)\n self.assertEqual('internal', adap.interface)\n self.assertEqual(\n 'https://example.org:8888/heat/v2', adap.endpoint_override\n )\n\n adap.get('/foo')\n self.assert_calls()", "def test_function_callparams(self):\n\n @Configurable(conf=category('', Parameter('test', value=True)))\n def twist(test=None):\n return test\n\n value = twist()\n\n self.assertTrue(value)\n\n value = twist(False)\n\n self.assertFalse(value)\n\n Configurable.get_annotations(twist)[0].conf['']['test'].value = 1\n\n value = twist()\n\n self.assertEqual(value, 1)", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'random_max_len,value_profile,'\n strategy1.probability = 1\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_real_value(self):\n setting_model = Setting(python_type='list', dry_value='')\n self.assertEqual(setting_model.value, [])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Lancelot,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Lancelot', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Robin,Patsy',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Robin', 'Patsy'])", "def test_setup_parameters(self, mock_homekit):\n self.assertTrue(setup.setup_component(\n self.hass, 'homekit', CONFIG))\n\n self.assertEqual(mock_homekit.mock_calls,\n [call(self.hass, 11111),\n call().setup_bridge(b'987-65-432')])", "def test_get_with_None_value(self):\n self.assertEqual(self.config.get('none_types','some_value'),None)\n self.assertEqual(self.config.get('none_types','some_value','something'),'something')", "def test_get_with_empty_value(self):\n self.assertEqual(self.config.get('none_types','other_value'),None)\n self.assertEqual(self.config.get('none_types','other_value','something'),'something')", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def test_entities__FieldCustomization__set_value__1(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n assert u'Default time zone value 123' == fc.get_value(field, 'label')\n assert u'Default time zone value 123' == fc.query_value(field, 'label')", "def test_config_prefix_none_value(self, mock_get_secret):\n kwargs = {'config_prefix': None}\n\n ssm_backend = SystemsManagerParameterStoreBackend(**kwargs)\n\n assert ssm_backend.get_config(\"config\") is None\n mock_get_secret.assert_not_called()", "def test_call_alt_params(self):\r\n otu_picker = BlastOtuPicker({'max_e_value': 1e-30})\r\n expected = {}\r\n actual = otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected)\r\n\r\n self.otu_picker = BlastOtuPicker(\r\n {'max_e_value': 1e-3, 'Similarity': 0.90})\r\n expected_90 = {'ref1': ['s3', 's2', 's1'],\r\n 'ref2': ['s4'],\r\n 'ref3': ['s5'],\r\n 'ref4': ['s6']}\r\n actual = self.otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected_90)", "def test_default_adapter_opts(self):\n conn = self._get_conn()\n\n server_id = str(uuid.uuid4())\n server_name = self.getUniqueString('name')\n fake_server = fakes.make_fake_server(server_id, server_name)\n\n self.register_uris(\n [\n self.get_nova_discovery_mock_dict(),\n dict(\n method='GET',\n uri=self.get_mock_url(\n 'compute', 'public', append=['servers', 'detail']\n ),\n json={'servers': [fake_server]},\n ),\n ]\n )\n\n # Nova has empty adapter config, so these default\n adap = conn.compute\n self.assertIsNone(adap.region_name)\n self.assertEqual('compute', adap.service_type)\n self.assertEqual('public', adap.interface)\n self.assertIsNone(adap.endpoint_override)\n\n s = next(adap.servers())\n self.assertEqual(s.id, server_id)\n self.assertEqual(s.name, server_name)\n self.assert_calls()", "def testDefault():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n oldX = conf.x\n conf.default('x', None)\n conf.default('Z', 5)\n\n assert conf.x == oldX\n assert conf.Z == 5", "def test_prep_overrides(self):\n original_data = self.form.data\n test_data = original_data.copy()\n test_data._mutable = False\n self.form.data = test_data # copied only to allow tear-down reverting to original.\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n self.form.fields = test_fields # copied to allow tear-down reverting to original.\n original_get_overrides = self.form.get_overrides\n def replace_overrides(): return self.formfield_attrs_overrides\n self.form.get_overrides = replace_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = {}\n overrides = self.formfield_attrs_overrides.copy()\n DEFAULT = overrides.pop('_default_')\n expected_attrs = {}\n for name, field in test_fields.items():\n attrs = field.widget.attrs.copy()\n if isinstance(field.widget, (RadioSelect, CheckboxSelectMultiple, CheckboxInput, )):\n pass # update if similar section in prep_fields is updated.\n attrs.update(overrides.get(name, {}))\n # TODO: setup structure for using default or defined version for all CharFields.\n no_resize = overrides.get(name, {}).pop('no_size_override', False)\n no_resize = True if isinstance(field.widget, (HiddenInput, MultipleHiddenInput)) else no_resize\n if no_resize:\n expected_attrs[name] = attrs\n continue # None of the following size overrides are applied for this field.\n if isinstance(field.widget, Textarea):\n width_attr_name = 'cols'\n default = DEFAULT.get('cols', None)\n display_size = attrs.get('cols', None)\n if 'rows' in DEFAULT:\n height = attrs.get('rows', None)\n height = min((DEFAULT['rows'], int(height))) if height else DEFAULT['rows']\n attrs['rows'] = str(height)\n if default: # For textarea, we always override. The others depend on different conditions.\n display_size = display_size or default\n display_size = min((int(display_size), int(default)))\n elif issubclass(field.__class__, CharField):\n width_attr_name = 'size' # 'size' is only valid for input types: email, password, tel, text\n default = DEFAULT.get('size', None) # Cannot use float(\"inf\") as an int.\n display_size = attrs.get('size', None)\n else: # This field does not have a size setting.\n width_attr_name, default, display_size = None, None, None\n input_size = attrs.get('maxlength', None)\n possible_size = [int(ea) for ea in (display_size or default, input_size) if ea]\n # attrs['size'] = str(int(min(float(display_size), float(input_size)))) # Can't use float(\"inf\") as an int.\n if possible_size and width_attr_name:\n attrs[width_attr_name] = str(min(possible_size))\n expected_attrs[name] = attrs\n # Expected:\n # formfield_attrs_overrides = {\n # '_default_': {'size': 15, 'cols': 20, 'rows': 4, },\n # 'first': {'maxlength': 191, 'size': 20, },\n # 'second': {'maxlength': 2, }, # 'size': 2,\n # 'last': {'maxlength': 2, 'size': 5, },\n # }\n result_fields = self.form.prep_fields()\n result_attrs = {name: field.widget.attrs.copy() for name, field in result_fields.items()}\n first_maxlength = expected_attrs['first']['maxlength'] # overrides['first']['maxlength']\n first_size = expected_attrs['first']['size'] # overrides['first']['size']\n second_maxlength = expected_attrs['second']['maxlength'] # overrides['second']['maxlength']\n last_maxlength = expected_attrs['last']['maxlength'] # overrides['last']['maxlength']\n last_size = expected_attrs['last']['size'] # overrides['last']['size']\n\n self.assertEqual(first_maxlength, result_fields['first'].widget.attrs.get('maxlength', None))\n self.assertEqual(first_size, result_fields['first'].widget.attrs.get('size', None))\n self.assertEqual(second_maxlength, result_fields['second'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_maxlength, result_fields['last'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_size, result_fields['last'].widget.attrs.get('size', None))\n for key, val in expected_attrs.items():\n self.assertEqual(val, result_attrs[key])\n self.assertDictEqual(expected_attrs, result_attrs)\n\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides", "def setUpConfig(self):\n pass", "def _setupConfigAnnotation(self):\n annotations = IAnnotations(self)\n settings = annotations.get(\"PLOMINOFIELDCONFIG\", None)\n if not settings:\n annotations[\"PLOMINOFIELDCONFIG\"] = PersistentDict()", "def test_default_setting():\n assert get_default_password_validators() == []", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia860\"] = Eia860Settings()\n values[\"eia861\"] = Eia861Settings()\n values[\"eia923\"] = Eia923Settings()\n\n return values", "def test_bad_ext_app_setting(self, mock_settings):\n lang, _ = Language.objects.get_or_create(name=\"Test Language\", code=\"text-x-lang\")\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": None})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": {}})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": ()})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": \" \"})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1.0})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)", "def mock_config_entry() -> MockConfigEntry:\n return MockConfigEntry(\n title=\"homeassistant.github\",\n domain=DOMAIN,\n data={CONF_TAILNET: \"homeassistant.github\", CONF_API_KEY: \"tskey-MOCK\"},\n unique_id=\"homeassistant.github\",\n )", "def test_build__override_default_values(self, valid_service: fixture) -> None:\n service: Service = valid_service\n service.recurring_rides_enabled = True\n\n assert service.recurring_rides_enabled is True", "def _use_existing_configuration(self):\n HW_Init(self.ftdi, None)", "def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')", "def test_configure(self):\r\n params = {\r\n 'test_str': 'This is only a test',\r\n 'test_empty': '',\r\n 'test_int': 12345,\r\n 'test_float': 123.45,\r\n 'test_dict': { 'test_key': 'test_val' },\r\n 'test_empty_dict': {},\r\n 'test_unicode': u'\\u2603 the snowman',\r\n 'test_none': None,\r\n 'test_boolean': False\r\n }\r\n\r\n for key, val in params.iteritems():\r\n\r\n # JSON-encode each parameter\r\n post_params = {key: json.dumps(val)}\r\n response = requests.put(self.url, data=post_params)\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # Check that the expected values were set in the configuration\r\n for key, val in params.iteritems():\r\n self.assertEqual(self.server.config.get(key), val)", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def set_default_parameters(self):\n super().set_default_parameters()", "def check_configuration(self):\n self.ensure_one()\n getattr(self, '%s_check_configuration' % self.provider, lambda: None)()", "def test_setup(self):\n self.assertIsNotNone(getattr(self, 'original_good_practice_attrs', None))\n self.assertIsNotNone(getattr(self, 'original_get_overrides', None))\n self.assertIsNotNone(getattr(self, 'original_get_alt_field_info', None))\n self.assertIsNone(getattr(self.form, 'is_prepared', None))\n self.assertNotIn('good_practice_attrs', self.form.has_call)\n self.assertNotIn('get_overrides', self.form.has_call)\n self.assertNotIn('get_alt_field_info', self.form.has_call)\n good_practice = self.form.good_practice_attrs()\n if self.good_practice == 'empty':\n self.assertEqual({}, good_practice)\n overrides = self.form.get_overrides()\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual({}, overrides)\n elif self.overrides_empty_or_skip == 'skip':\n self.assertEqual(self.no_resize_override(), overrides)\n if self.alt_info == 'empty':\n self.assertEqual({}, self.form.get_alt_field_info())\n self.assertIn('get_alt_field_info', self.form.has_call)\n self.assertEqual(self.form.get_alt_field_info.__name__, 'empty_get_alt_field_info')\n self.assertIn('good_practice_attrs', self.form.has_call)\n self.assertIn('get_overrides', self.form.has_call)\n self.form.has_call = []\n self.assertEqual(self.form.good_practice_attrs.__name__, 'empty_good_practice_attrs')\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual(self.form.get_overrides.__name__, 'empty_get_overrides')\n self.assertEqual(self.form.get_overrides.__name__, 'skip_get_overrides')\n request_type = 'POST' if self.get_initial_data() else 'GET'\n self.assertEqual(request_type, self.request.method)", "def test_all_providers(self, mock_provider, monkeypatch):\n\n def mock_providers():\n return ['mock']\n\n monkeypatch.setattr(notifiers, 'all_providers', mock_providers)\n\n assert 'mock' in notifiers.all_providers()", "async def test_form_zeroconf(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"192.168.1.5\",\n addresses=[\"192.168.1.5\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=1234,\n properties={},\n type=\"mock_type\",\n ),\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {}\n\n mock_pynut = _get_mock_pynutclient(\n list_vars={\"battery.voltage\": \"voltage\", \"ups.status\": \"OL\"}, list_ups=[\"ups1\"]\n )\n\n with patch(\n \"homeassistant.components.nut.PyNUTClient\",\n return_value=mock_pynut,\n ), patch(\n \"homeassistant.components.nut.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_USERNAME: \"test-username\", CONF_PASSWORD: \"test-password\"},\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert result2[\"title\"] == \"192.168.1.5:1234\"\n assert result2[\"data\"] == {\n CONF_HOST: \"192.168.1.5\",\n CONF_PASSWORD: \"test-password\",\n CONF_PORT: 1234,\n CONF_USERNAME: \"test-username\",\n }\n assert result2[\"result\"].unique_id is None\n assert len(mock_setup_entry.mock_calls) == 1" ]
[ "0.67629653", "0.6631736", "0.6281163", "0.5879345", "0.5806605", "0.55553335", "0.5497894", "0.546793", "0.54649425", "0.54343826", "0.5406209", "0.5285645", "0.5254529", "0.52485114", "0.5243045", "0.523051", "0.5200763", "0.5126939", "0.5116786", "0.5110514", "0.51018703", "0.50936586", "0.5077086", "0.5076001", "0.5073581", "0.50727195", "0.50708586", "0.50692844", "0.50624955", "0.5052041", "0.5021172", "0.50183326", "0.50082546", "0.500043", "0.5000132", "0.49956113", "0.49937412", "0.4990103", "0.49812508", "0.49778596", "0.49611852", "0.49542168", "0.49452066", "0.4942339", "0.493743", "0.49126965", "0.49079832", "0.49016252", "0.48924586", "0.48888457", "0.4888381", "0.48752713", "0.48727825", "0.4868186", "0.48667294", "0.48604178", "0.48601556", "0.48582733", "0.48572955", "0.48548788", "0.48472467", "0.48461244", "0.484604", "0.48370636", "0.48318803", "0.4822267", "0.4820576", "0.48165676", "0.48148856", "0.48134223", "0.4812294", "0.48113325", "0.48100907", "0.48098612", "0.48021337", "0.47943613", "0.4794053", "0.47894165", "0.47885188", "0.47859564", "0.4780754", "0.4779795", "0.47790393", "0.47751707", "0.4771662", "0.47695833", "0.4768789", "0.47676924", "0.47640416", "0.47610486", "0.47593313", "0.475495", "0.47531807", "0.47492298", "0.47492298", "0.47442392", "0.47431362", "0.4742358", "0.4742084", "0.47417626" ]
0.63661134
2
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present_override_relevant_value(self): value_map = {'country': {'Australia': 'NZ'}} expected_country = 'NZ' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } if value_map: provider_settings['sapsf_value_mappings'] = value_map self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_empty_value_override(self):\n\n value_map = {'country': {}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_with_value_default(self):\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,country,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'country': 'Australia'\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)\n\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings),\n default_email='[email protected]'\n )\n self.USER_EMAIL = '[email protected]'\n self._test_register()", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"value\": \"There is a product bought\"}',\n response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/?format=json\", data={\"PLN\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": \"100\"}', response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn('{\"PLN\": \"100\"}', response.content)", "def test_mocked_api_set_new_value(self):\n c = Client()\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(\n self.patch_url, data={\"PLN\": 20, \"EURO\": 20})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)", "def test_entities__FieldCustomization__set_value__2(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())", "def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"", "def test_entities__NoFieldCustomization__set_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n with pytest.raises(NotImplementedError):\n nfc.set_value(IAddressBook['time_zone'], u'label', u'foo')", "def setup_provider(self):\n pass", "def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def setUp(self):\n super().setUp()\n self.mock_requests(get_geocode_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK), \n get_data=copy.deepcopy(test_constants.GET_LIBRARY_API_MOCK))", "def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])", "def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")", "def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')", "def test_provided_data_takes_precedence_over_environ(self, mock_provider, monkeypatch):\n p = mock_provider()\n prefix = f'mock_'\n monkeypatch.setenv(f'{prefix}{p.provider_name}_required'.upper(), 'foo')\n rsp = p.notify(required='bar', env_prefix=prefix)\n assert rsp.status == 'success'\n assert rsp.data['required'] == 'bar'", "def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])", "def test_update_reg_ex_config(self):\n pass", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)", "def test_value_base_metadata_param(self):\n value = { 'color': 'blue' }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, value=value)\n self.assertEqual(base_meta.value, value)", "def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_set_dict_value_2(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")", "def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def testMapSetdefault(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap()\n with self.assertRaises(AssertionError):\n m.setdefault(1, data_types.BuildStats())\n with self.assertRaises(AssertionError):\n m.setdefault('1', 2)\n m.setdefault('1', data_types.BuildStats())\n self.assertEqual(m, {'1': data_types.BuildStats()})", "def __init__(self, allow_replace=False):\n self.providers = {}\n self.allow_replace = allow_replace", "def test_update_when_value_is_none(self, mock_req):\n self.setup_api(None, mock_req)\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n self.fake_delay(2)\n sensor.update()\n assert sensor.state is None", "def defaults_provider():\n return getattr(defaults_provider, 'overrides', {})", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def test_patch_user_identity_mapping(self):\n pass", "def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )", "def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_entities__FieldCustomization__set_value__3(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)", "async def test_no_value_template(hass: HomeAssistant, calls) -> None:\n await _register_fan_sources(hass)\n\n with assert_setup_component(1, \"fan\"):\n test_fan_config = {\n \"preset_mode_template\": \"{{ states('input_select.preset_mode') }}\",\n \"percentage_template\": \"{{ states('input_number.percentage') }}\",\n \"oscillating_template\": \"{{ states('input_select.osc') }}\",\n \"direction_template\": \"{{ states('input_select.direction') }}\",\n \"turn_on\": [\n {\n \"service\": \"input_boolean.turn_on\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_on\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"turn_off\": [\n {\n \"service\": \"input_boolean.turn_off\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_off\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"set_preset_mode\": [\n {\n \"service\": \"input_select.select_option\",\n \"data_template\": {\n \"entity_id\": _PRESET_MODE_INPUT_SELECT,\n \"option\": \"{{ preset_mode }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_preset_mode\",\n \"caller\": \"{{ this.entity_id }}\",\n \"option\": \"{{ preset_mode }}\",\n },\n },\n ],\n \"set_percentage\": [\n {\n \"service\": \"input_number.set_value\",\n \"data_template\": {\n \"entity_id\": _PERCENTAGE_INPUT_NUMBER,\n \"value\": \"{{ percentage }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_value\",\n \"caller\": \"{{ this.entity_id }}\",\n \"value\": \"{{ percentage }}\",\n },\n },\n ],\n }\n assert await setup.async_setup_component(\n hass,\n \"fan\",\n {\"fan\": {\"platform\": \"template\", \"fans\": {\"test_fan\": test_fan_config}}},\n )\n\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()\n\n await common.async_turn_on(hass, _TEST_FAN)\n _verify(hass, STATE_ON, 0, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, 0, None, None, None)\n\n percent = 100\n await common.async_set_percentage(hass, _TEST_FAN, percent)\n assert int(float(hass.states.get(_PERCENTAGE_INPUT_NUMBER).state)) == percent\n _verify(hass, STATE_ON, percent, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, None)\n\n preset = \"auto\"\n await common.async_set_preset_mode(hass, _TEST_FAN, preset)\n assert hass.states.get(_PRESET_MODE_INPUT_SELECT).state == preset\n _verify(hass, STATE_ON, percent, None, None, preset)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_set_direction(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_oscillate(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)", "def test_all_providers(self, mock_provider, monkeypatch):\n\n def mock_providers():\n return ['mock']\n\n monkeypatch.setattr(notifiers, 'all_providers', mock_providers)\n\n assert 'mock' in notifiers.all_providers()", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'random_max_len,value_profile,'\n strategy1.probability = 1\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_patch_hyperflex_proxy_setting_policy(self):\n pass", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def testServiceMapping_ByFactory(self):\n self.DoMappingTest({'/my-service': MyService.new_factory('new-value')})", "def testTestExpectationMap(self):\n self._StringToMapHelper(data_types.TestExpectationMap,\n data_types.ExpectationBuilderMap)", "def test_entities__FieldCustomization__set_value__1(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n assert u'Default time zone value 123' == fc.get_value(field, 'label')\n assert u'Default time zone value 123' == fc.query_value(field, 'label')", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": \"555\"}', response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/product_buy/?format=json\", data={\"price\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"product was bought\", response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn({\"account\": \"455\"}, response.content)", "def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite", "def test_get_setting(monkeypatch):\n random_key = uuid()\n default_value, actual_value = \"foo\", \"bar\"\n assert helpers.get_setting(random_key, default_value) == default_value\n monkeypatch.setenv(random_key, actual_value)\n assert helpers.get_setting(random_key, default_value) == actual_value", "def test_handle_value_error(self, runway_context: MockRunwayContext) -> None:\n runway_context.add_stubber(\"ecr\")\n with pytest.raises(ValueError) as excinfo:\n EcrLookup.handle(\"unsupported\", runway_context)\n assert str(excinfo.value) == \"ecr lookup does not support 'unsupported'\"\n with pytest.raises(ValueError):\n EcrLookup.handle(\"unsupported::default=something\", runway_context)", "def test_setup(self):\n self.assertIsNotNone(getattr(self, 'original_good_practice_attrs', None))\n self.assertIsNotNone(getattr(self, 'original_get_overrides', None))\n self.assertIsNotNone(getattr(self, 'original_get_alt_field_info', None))\n self.assertIsNone(getattr(self.form, 'is_prepared', None))\n self.assertNotIn('good_practice_attrs', self.form.has_call)\n self.assertNotIn('get_overrides', self.form.has_call)\n self.assertNotIn('get_alt_field_info', self.form.has_call)\n good_practice = self.form.good_practice_attrs()\n if self.good_practice == 'empty':\n self.assertEqual({}, good_practice)\n overrides = self.form.get_overrides()\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual({}, overrides)\n elif self.overrides_empty_or_skip == 'skip':\n self.assertEqual(self.no_resize_override(), overrides)\n if self.alt_info == 'empty':\n self.assertEqual({}, self.form.get_alt_field_info())\n self.assertIn('get_alt_field_info', self.form.has_call)\n self.assertEqual(self.form.get_alt_field_info.__name__, 'empty_get_alt_field_info')\n self.assertIn('good_practice_attrs', self.form.has_call)\n self.assertIn('get_overrides', self.form.has_call)\n self.form.has_call = []\n self.assertEqual(self.form.good_practice_attrs.__name__, 'empty_good_practice_attrs')\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual(self.form.get_overrides.__name__, 'empty_get_overrides')\n self.assertEqual(self.form.get_overrides.__name__, 'skip_get_overrides')\n request_type = 'POST' if self.get_initial_data() else 'GET'\n self.assertEqual(request_type, self.request.method)", "def test_call_alt_params(self):\r\n otu_picker = BlastOtuPicker({'max_e_value': 1e-30})\r\n expected = {}\r\n actual = otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected)\r\n\r\n self.otu_picker = BlastOtuPicker(\r\n {'max_e_value': 1e-3, 'Similarity': 0.90})\r\n expected_90 = {'ref1': ['s3', 's2', 's1'],\r\n 'ref2': ['s4'],\r\n 'ref3': ['s5'],\r\n 'ref4': ['s6']}\r\n actual = self.otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected_90)", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def mocked_config_file_custom_provider():\n data = \"\"\"[YESSSSMS]\nLOGIN = 06501234567\nPASSWD = MySecre3tPassw0rd\nDEFAULT_TO = +43664123123123\n# MVNO = FANTASYMOBILE\n[YESSSSMS_PROVIDER_URLS]\nLOGIN_URL = mock://kontomanager.at/index.php\nLOGOUT_URL = mock://kontomanager.at/index.php?dologout=2\nKONTOMANAGER_URL = mock://kontomanager.at/kundendaten.php\nWEBSMS_FORM_URL = mock://kontomanager.at/websms.php\nSEND_SMS_URL = mock://kontomanager.at/websms_send.php\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def test_apply_startup_params(self):\n self.assertRaises(NotImplementedException,\n self.driver.apply_startup_params)", "def test_map_view_context_fields_values(\n self,\n mock_installation_statistics_model_overall_counts,\n mock_installation_statistics_model_data_per_period,\n mock_installation_statistics_model_timeline,\n mock_get_data_created_datetime_scope\n ): # pylint: disable=too-many-locals\n mock_timeline = ['2017-05-14', '2017-05-15', '2017-05-16']\n mock_students, mock_courses, mock_instances = [4124, 5122, 6412], [110, 211, 167], [30, 20, 25]\n mock_instances_count, mock_courses_count, mock_students_count, mock_certificates_count = 6412, 167, 25, 0\n mock_first_datetime_of_update_data = datetime(2017, 6, 1, 14, 56, 18)\n mock_last_datetime_of_update_data = datetime(2017, 7, 2, 23, 12, 8)\n\n mock_installation_statistics_model_timeline.return_value = mock_timeline\n\n mock_installation_statistics_model_data_per_period.return_value = mock_students, mock_courses, mock_instances\n\n mock_installation_statistics_model_overall_counts.return_value = {\n \"instances_count\": 6412,\n \"courses_count\": 167,\n \"students_count\": 25,\n \"generated_certificates_count\": 0,\n }\n\n mock_get_data_created_datetime_scope.return_value = \\\n mock_first_datetime_of_update_data, mock_last_datetime_of_update_data\n\n response = self.client.get('/')\n\n self.assertEqual(json.loads(response.context['timeline']), mock_timeline)\n self.assertEqual(json.loads(response.context['students']), mock_students)\n self.assertEqual(json.loads(response.context['courses']), mock_courses)\n self.assertEqual(json.loads(response.context['instances']), mock_instances)\n self.assertEqual(response.context['instances_count'], mock_instances_count)\n self.assertEqual(response.context['students_count'], mock_students_count)\n self.assertEqual(response.context['courses_count'], mock_courses_count)\n self.assertEqual(response.context['generated_certificates_count'], mock_certificates_count)\n self.assertEqual(response.context['first_datetime_of_update_data'], mock_first_datetime_of_update_data)\n self.assertEqual(response.context['last_datetime_of_update_data'], mock_last_datetime_of_update_data)", "def test_settings_proxy_properties_setting(parameters: Dict[str, Any]) -> None:\n settings = Settings()\n settings_proxy = settings.create_proxy()\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))\n\n for key, value in parameters.items():\n settings.__setattr__(key, value)\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))", "def test_config_device_init_with_defaults(get_config, monkeypatch):\n notbase_config = {'not_presented': 1}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', notbase_config)\n cfg = get_config(DeviceConfig, base_config)\n\n assert isinstance(cfg, DeviceConfig), 'wrong class'\n assert cfg.data == notbase_config, 'bad config loaded'", "def setUp(self):\n super().setUp()\n\n self.provider = Provider.objects.filter(type=Provider.PROVIDER_OCP).first()\n\n ocp_metric = metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR\n ocp_source_type = Provider.PROVIDER_OCP\n tiered_rates = [{\"unit\": \"USD\", \"value\": 0.22}]\n self.ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": ocp_source_type,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}, \"tiered_rates\": tiered_rates}],\n \"currency\": \"USD\",\n }\n self.basic_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}}],\n \"currency\": \"USD\",\n }", "async def test_basic_setup(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: ProtectCamera\n):\n\n camera_high_only = mock_camera.copy(deep=True)\n camera_high_only._api = mock_entry.api\n camera_high_only.channels[0]._api = mock_entry.api\n camera_high_only.channels[1]._api = mock_entry.api\n camera_high_only.channels[2]._api = mock_entry.api\n camera_high_only.name = \"Test Camera 1\"\n camera_high_only.id = \"test_high\"\n camera_high_only.channels[0].is_rtsp_enabled = True\n camera_high_only.channels[0].name = \"High\"\n camera_high_only.channels[0].rtsp_alias = \"test_high_alias\"\n camera_high_only.channels[1].is_rtsp_enabled = False\n camera_high_only.channels[2].is_rtsp_enabled = False\n\n camera_medium_only = mock_camera.copy(deep=True)\n camera_medium_only._api = mock_entry.api\n camera_medium_only.channels[0]._api = mock_entry.api\n camera_medium_only.channels[1]._api = mock_entry.api\n camera_medium_only.channels[2]._api = mock_entry.api\n camera_medium_only.name = \"Test Camera 2\"\n camera_medium_only.id = \"test_medium\"\n camera_medium_only.channels[0].is_rtsp_enabled = False\n camera_medium_only.channels[1].is_rtsp_enabled = True\n camera_medium_only.channels[1].name = \"Medium\"\n camera_medium_only.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_medium_only.channels[2].is_rtsp_enabled = False\n\n camera_all_channels = mock_camera.copy(deep=True)\n camera_all_channels._api = mock_entry.api\n camera_all_channels.channels[0]._api = mock_entry.api\n camera_all_channels.channels[1]._api = mock_entry.api\n camera_all_channels.channels[2]._api = mock_entry.api\n camera_all_channels.name = \"Test Camera 3\"\n camera_all_channels.id = \"test_all\"\n camera_all_channels.channels[0].is_rtsp_enabled = True\n camera_all_channels.channels[0].name = \"High\"\n camera_all_channels.channels[0].rtsp_alias = \"test_high_alias\"\n camera_all_channels.channels[1].is_rtsp_enabled = True\n camera_all_channels.channels[1].name = \"Medium\"\n camera_all_channels.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_all_channels.channels[2].is_rtsp_enabled = True\n camera_all_channels.channels[2].name = \"Low\"\n camera_all_channels.channels[2].rtsp_alias = \"test_low_alias\"\n\n camera_no_channels = mock_camera.copy(deep=True)\n camera_no_channels._api = mock_entry.api\n camera_no_channels.channels[0]._api = mock_entry.api\n camera_no_channels.channels[1]._api = mock_entry.api\n camera_no_channels.channels[2]._api = mock_entry.api\n camera_no_channels.name = \"Test Camera 4\"\n camera_no_channels.id = \"test_none\"\n camera_no_channels.channels[0].is_rtsp_enabled = False\n camera_no_channels.channels[0].name = \"High\"\n camera_no_channels.channels[1].is_rtsp_enabled = False\n camera_no_channels.channels[2].is_rtsp_enabled = False\n\n camera_package = mock_camera.copy(deep=True)\n camera_package._api = mock_entry.api\n camera_package.channels[0]._api = mock_entry.api\n camera_package.channels[1]._api = mock_entry.api\n camera_package.channels[2]._api = mock_entry.api\n camera_package.name = \"Test Camera 5\"\n camera_package.id = \"test_package\"\n camera_package.channels[0].is_rtsp_enabled = True\n camera_package.channels[0].name = \"High\"\n camera_package.channels[0].rtsp_alias = \"test_high_alias\"\n camera_package.channels[1].is_rtsp_enabled = False\n camera_package.channels[2].is_rtsp_enabled = False\n package_channel = camera_package.channels[0].copy(deep=True)\n package_channel.is_rtsp_enabled = False\n package_channel.name = \"Package Camera\"\n package_channel.id = 3\n package_channel.fps = 2\n package_channel.rtsp_alias = \"test_package_alias\"\n camera_package.channels.append(package_channel)\n\n mock_entry.api.bootstrap.cameras = {\n camera_high_only.id: camera_high_only,\n camera_medium_only.id: camera_medium_only,\n camera_all_channels.id: camera_all_channels,\n camera_no_channels.id: camera_no_channels,\n camera_package.id: camera_package,\n }\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.CAMERA, 14, 6)\n\n # test camera 1\n entity_id = validate_default_camera_entity(hass, camera_high_only, 0)\n await validate_rtsps_camera_state(hass, camera_high_only, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_high_only, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_high_only, 0, entity_id)\n\n # test camera 2\n entity_id = validate_default_camera_entity(hass, camera_medium_only, 1)\n await validate_rtsps_camera_state(hass, camera_medium_only, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_medium_only, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_medium_only, 1, entity_id)\n\n # test camera 3\n entity_id = validate_default_camera_entity(hass, camera_all_channels, 0)\n await validate_rtsps_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 2, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 2, entity_id)\n\n # test camera 4\n entity_id = validate_default_camera_entity(hass, camera_no_channels, 0)\n await validate_no_stream_camera_state(\n hass, camera_no_channels, 0, entity_id, features=0\n )\n\n # test camera 5\n entity_id = validate_default_camera_entity(hass, camera_package, 0)\n await validate_rtsps_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_package, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_default_camera_entity(hass, camera_package, 3)\n await validate_no_stream_camera_state(\n hass, camera_package, 3, entity_id, features=0\n )", "async def test_form_multiple_services(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=FAKE_SERVICES):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_FORM\n assert result2[\"step_id\"] == \"service\"\n assert result2[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]]},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result3[\"title\"] == TEST_USERNAME\n assert result3[\"data\"] == FAKE_DATA\n assert result3[\"options\"] == {\n CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]],\n }\n assert len(mock_setup_entry.mock_calls) == 1", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia860\"] = Eia860Settings()\n values[\"eia861\"] = Eia861Settings()\n values[\"eia923\"] = Eia923Settings()\n\n return values", "def requires_mapping(self):", "def test_set_dict_value_3(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"C\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"C\")", "def used_implementation(self, impl: str, value):", "def test_real_value(self):\n setting_model = Setting(python_type='list', dry_value='')\n self.assertEqual(setting_model.value, [])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Lancelot,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Lancelot', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Robin,Patsy',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Robin', 'Patsy'])", "def test_input_identifier_types(self):\n # It's okay to set INPUT_IDENTIFIER_TYPES to None it means you\n # will cover any and all identifier types.\n class Base(IdentifierCoverageProvider):\n SERVICE_NAME = \"Test provider\"\n DATA_SOURCE_NAME = DataSource.GUTENBERG\n\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = None\n provider = MockProvider(self._db)\n assert None == provider.input_identifier_types\n\n # It's okay to set a single value.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = Identifier.ISBN\n provider = MockProvider(self._db)\n assert [Identifier.ISBN] == provider.input_identifier_types\n\n # It's okay to set a list of values.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = [Identifier.ISBN, Identifier.OVERDRIVE_ID]\n provider = MockProvider(self._db)\n assert ([Identifier.ISBN, Identifier.OVERDRIVE_ID] ==\n provider.input_identifier_types)\n\n # It's not okay to do nothing.\n class MockProvider(Base):\n pass\n with pytest.raises(ValueError) as excinfo:\n MockProvider(self._db)\n assert \"MockProvider must define INPUT_IDENTIFIER_TYPES, even if the value is None.\" in str(excinfo.value)", "def _verify_map(self):\n # ---- verify self.info ----\n if self.info['contype'] == NotImplemented:\n # 'contype' must be defined\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n elif self.info['contype'] not in ['motion',\n 'waveform',\n 'power']:\n # 'contype' must be one of specified type\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n\n # ---- verity self.configs ----\n # 'dataset fields' must be defined\n if 'dataset fields' not in self.configs:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n elif self.configs['dataset fields'] == NotImplemented:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n\n # 'dset field to numpy field' must be defined\n # - each 'dataset field' needs a mapping to a structured numpy\n # field for hdfReadControl\n # - 'dset field to numpy field' is a list of 3-element tuples\n # where each entry in the list corresponds to a dataset field\n # name\n # - the 3-element tuple must follow the format:\n #\n # self.configs['dset field to numpy field'][i] = (\n # str, # dataset field name\n # str, # corresponding structured numpy field name\n # int) # index of structured numpy field\n #\n # For example, the '6K Compumotor would look like...\n # self.configs['dset field to numpy'] = [\n # ('x', 'xyz', 0),\n # ('y', 'xyz', 1),\n # ('z', 'xyz', 2)]\n #\n key = 'dset field to numpy field'\n if key not in self.configs:\n raise NotImplementedError\n elif self.configs[key] == NotImplemented:\n raise NotImplementedError\n elif type(self.configs[key]) is not list:\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(isinstance(val, tuple)\n for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(len(val) == 3 for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n else:\n err = False\n dset_fields = [name\n for name, dftype\n in self.configs['dataset fields']]\n for dfname, npfname, npi in self.configs[key]:\n if dfname not in dset_fields:\n err = True\n break\n elif type(npfname) is not str:\n err = True\n break\n elif type(npi) is not int:\n err = True\n break\n if err:\n errstr = \"self.configs['dset field to numpy field] \" \\\n + \"must be a list of 3-element tuples\"\n raise Exception(errstr)\n\n # contype == 'motion' specific verification\n if self.contype == 'motion':\n # verify 'motion list'\n if 'motion list' not in self.configs:\n # 'motion list' exists\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['motion list'] == NotImplemented:\n # 'motion list' is defined\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'motion list' must have its own config\n for name in self.configs['motion list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['motion \" \\\n \"name'] for each motion list in \" \\\n \"self.configs['motion list'] = \" \\\n \"[motion name, ]\"\n raise NotImplementedError(errstr)\n\n # verify 'probe list'\n if 'probe list' not in self.configs:\n # 'probe list' exists\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['probe list'] == NotImplemented:\n # 'probe list' is defined\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'probe list' must have its own config\n for name in self.configs['probe list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['probe \" \\\n \"name'] for each probe in \" \\\n \"self.configs['probe list'] = \" \\\n \"[probe name, ]\"\n raise NotImplementedError(errstr)\n\n # delete 'config names' if present\n if 'config names' in self.configs:\n del self.configs['config names']\n\n # verify all other contypes\n if self.contype != 'motion':\n # remove 'motion list'\n if 'motion list' in self.configs:\n # remove 'motion list' children\n for name in self.configs['motion list']:\n if name in self.configs:\n del(self.configs[name])\n\n # remove 'motion list'\n del(self.configs['motion list'])\n\n # remove 'probe list'\n if 'probe list' in self.configs:\n # remove 'probe list' children\n for name in self.configs['probe list']:\n if name in self.configs:\n del (self.configs[name])\n\n # remove 'motion list'\n del (self.configs['probe list'])\n\n # verify 'command list'\n # if 'command list' not in self.configs:\n # # 'command list' exists\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)\n # elif self.configs['command list'] == NotImplemented:\n # # 'motion list' is defined\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)", "def test_prep_overrides(self):\n original_data = self.form.data\n test_data = original_data.copy()\n test_data._mutable = False\n self.form.data = test_data # copied only to allow tear-down reverting to original.\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n self.form.fields = test_fields # copied to allow tear-down reverting to original.\n original_get_overrides = self.form.get_overrides\n def replace_overrides(): return self.formfield_attrs_overrides\n self.form.get_overrides = replace_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = {}\n overrides = self.formfield_attrs_overrides.copy()\n DEFAULT = overrides.pop('_default_')\n expected_attrs = {}\n for name, field in test_fields.items():\n attrs = field.widget.attrs.copy()\n if isinstance(field.widget, (RadioSelect, CheckboxSelectMultiple, CheckboxInput, )):\n pass # update if similar section in prep_fields is updated.\n attrs.update(overrides.get(name, {}))\n # TODO: setup structure for using default or defined version for all CharFields.\n no_resize = overrides.get(name, {}).pop('no_size_override', False)\n no_resize = True if isinstance(field.widget, (HiddenInput, MultipleHiddenInput)) else no_resize\n if no_resize:\n expected_attrs[name] = attrs\n continue # None of the following size overrides are applied for this field.\n if isinstance(field.widget, Textarea):\n width_attr_name = 'cols'\n default = DEFAULT.get('cols', None)\n display_size = attrs.get('cols', None)\n if 'rows' in DEFAULT:\n height = attrs.get('rows', None)\n height = min((DEFAULT['rows'], int(height))) if height else DEFAULT['rows']\n attrs['rows'] = str(height)\n if default: # For textarea, we always override. The others depend on different conditions.\n display_size = display_size or default\n display_size = min((int(display_size), int(default)))\n elif issubclass(field.__class__, CharField):\n width_attr_name = 'size' # 'size' is only valid for input types: email, password, tel, text\n default = DEFAULT.get('size', None) # Cannot use float(\"inf\") as an int.\n display_size = attrs.get('size', None)\n else: # This field does not have a size setting.\n width_attr_name, default, display_size = None, None, None\n input_size = attrs.get('maxlength', None)\n possible_size = [int(ea) for ea in (display_size or default, input_size) if ea]\n # attrs['size'] = str(int(min(float(display_size), float(input_size)))) # Can't use float(\"inf\") as an int.\n if possible_size and width_attr_name:\n attrs[width_attr_name] = str(min(possible_size))\n expected_attrs[name] = attrs\n # Expected:\n # formfield_attrs_overrides = {\n # '_default_': {'size': 15, 'cols': 20, 'rows': 4, },\n # 'first': {'maxlength': 191, 'size': 20, },\n # 'second': {'maxlength': 2, }, # 'size': 2,\n # 'last': {'maxlength': 2, 'size': 5, },\n # }\n result_fields = self.form.prep_fields()\n result_attrs = {name: field.widget.attrs.copy() for name, field in result_fields.items()}\n first_maxlength = expected_attrs['first']['maxlength'] # overrides['first']['maxlength']\n first_size = expected_attrs['first']['size'] # overrides['first']['size']\n second_maxlength = expected_attrs['second']['maxlength'] # overrides['second']['maxlength']\n last_maxlength = expected_attrs['last']['maxlength'] # overrides['last']['maxlength']\n last_size = expected_attrs['last']['size'] # overrides['last']['size']\n\n self.assertEqual(first_maxlength, result_fields['first'].widget.attrs.get('maxlength', None))\n self.assertEqual(first_size, result_fields['first'].widget.attrs.get('size', None))\n self.assertEqual(second_maxlength, result_fields['second'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_maxlength, result_fields['last'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_size, result_fields['last'].widget.attrs.get('size', None))\n for key, val in expected_attrs.items():\n self.assertEqual(val, result_attrs[key])\n self.assertDictEqual(expected_attrs, result_attrs)\n\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides", "def test__authenticate_with_custom_port(self):\n get_cloud_config_mock = [\n \"proxmox.connection.url\",\n \"9999\",\n \"fakeuser\",\n \"secretpassword\",\n True,\n ]\n requests_post_mock = MagicMock()\n with patch(\n \"salt.config.get_cloud_config_value\",\n autospec=True,\n side_effect=get_cloud_config_mock,\n ), patch(\"requests.post\", requests_post_mock):\n proxmox._authenticate()\n requests_post_mock.assert_called_with(\n \"https://proxmox.connection.url:9999/api2/json/access/ticket\",\n verify=True,\n data={\"username\": (\"fakeuser\",), \"password\": \"secretpassword\"},\n )", "def test_function_callparams(self):\n\n @Configurable(conf=category('', Parameter('test', value=True)))\n def twist(test=None):\n return test\n\n value = twist()\n\n self.assertTrue(value)\n\n value = twist(False)\n\n self.assertFalse(value)\n\n Configurable.get_annotations(twist)[0].conf['']['test'].value = 1\n\n value = twist()\n\n self.assertEqual(value, 1)", "def test_bad_ext_app_setting(self, mock_settings):\n lang, _ = Language.objects.get_or_create(name=\"Test Language\", code=\"text-x-lang\")\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": None})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": {}})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": ()})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": \" \"})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1.0})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia\"] = EiaSettings()\n values[\"epacems\"] = EpaCemsSettings()\n values[\"ferc1\"] = Ferc1Settings()\n values[\"ferc714\"] = Ferc714Settings()\n values[\"glue\"] = GlueSettings()\n\n return values", "async def test_plenticore_async_setup_g1(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_apiclient: ApiClient,\n) -> None:\n mock_apiclient.get_settings = AsyncMock(\n return_value={\"scb:network\": [SettingsData({\"id\": \"Hostname\"})]}\n )\n mock_apiclient.get_setting_values = AsyncMock(\n # G1 model has the entry id \"Hostname\"\n return_value={\n \"devices:local\": {\n \"Properties:SerialNo\": \"12345\",\n \"Branding:ProductName1\": \"PLENTICORE\",\n \"Branding:ProductName2\": \"plus 10\",\n \"Properties:VersionIOC\": \"01.45\",\n \"Properties:VersionMC\": \"01.46\",\n },\n \"scb:network\": {\"Hostname\": \"scb\"},\n }\n )\n\n mock_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n plenticore = hass.data[DOMAIN][mock_config_entry.entry_id]\n\n assert plenticore.device_info == DeviceInfo(\n configuration_url=\"http://192.168.1.2\",\n identifiers={(DOMAIN, \"12345\")},\n manufacturer=\"Kostal\",\n model=\"PLENTICORE plus 10\",\n name=\"scb\",\n sw_version=\"IOC: 01.45 MC: 01.46\",\n )", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {0: ['cdhit_test_seqs_0'],\r\n 1: ['cdhit_test_seqs_1'],\r\n 2: ['cdhit_test_seqs_2'],\r\n 3: ['cdhit_test_seqs_3'],\r\n 4: ['cdhit_test_seqs_4'],\r\n 5: ['cdhit_test_seqs_5'],\r\n 6: ['cdhit_test_seqs_6'],\r\n 7: ['cdhit_test_seqs_7'],\r\n 8: ['cdhit_test_seqs_8'],\r\n 9: ['cdhit_test_seqs_9']}\r\n\r\n app = CdHitOtuPicker(params={})\r\n obs = app(self.tmp_seq_filepath1)\r\n self.assertEqual(obs, exp)", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "async def test_setup_multiple(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n bootstrap: Bootstrap,\n) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac\n\n nvr = bootstrap.nvr\n nvr._api = ufp.api\n nvr.mac = \"A1E00C826983\"\n nvr.id\n ufp.api.get_nvr = AsyncMock(return_value=nvr)\n\n with patch(\n \"homeassistant.components.unifiprotect.utils.ProtectApiClient\"\n ) as mock_api:\n mock_config = MockConfigEntry(\n domain=DOMAIN,\n data={\n \"host\": \"1.1.1.1\",\n \"username\": \"test-username\",\n \"password\": \"test-password\",\n \"id\": \"UnifiProtect\",\n \"port\": 443,\n \"verify_ssl\": False,\n },\n version=2,\n )\n mock_config.add_to_hass(hass)\n\n mock_api.return_value = ufp.api\n\n await hass.config_entries.async_setup(mock_config.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert mock_config.unique_id == ufp.api.bootstrap.nvr.mac", "def test_get_user_settings(client, jwt, session, keycloak_mock, monkeypatch): # pylint:disable=unused-argument\n user_model = factory_user_model(user_info=TestUserInfo.user_test)\n contact = factory_contact_model()\n contact_link = ContactLinkModel()\n contact_link.contact = contact\n contact_link.user = user_model\n contact_link.commit()\n kc_id = user_model.keycloak_guid\n\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id)\n claims['idp_userid'] = str(user_model.idp_userid)\n patch_token_info(claims, monkeypatch)\n\n OrgService.create_org(TestOrgInfo.org_branch_name, user_id=user_model.id)\n\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id}/settings', headers=headers, content_type='application/json')\n item_list = rv.json\n account = next(obj for obj in item_list if obj['type'] == 'ACCOUNT')\n assert account['accountType'] == 'BASIC'\n assert account['additionalLabel'] == TestOrgInfo.org_branch_name.get('branchName')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n assert account['productSettings'] == f'/account/{account[\"id\"]}/restricted-product'\n\n kc_id_no_user = TestUserInfo.user1.get('keycloak_guid')\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id_no_user)\n patch_token_info(claims, monkeypatch)\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id_no_user}/settings', headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n item_list = rv.json\n account = next((obj for obj in item_list if obj['type'] == 'ACCOUNT'), None)\n assert account is None\n user_profile = next(obj for obj in item_list if obj['type'] == 'USER_PROFILE')\n assert '/userprofile' in user_profile.get('urlpath')", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 1\n strategy1.engine = 'afl'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_entities__FieldCustomization__default_value__1(address_book):\n fc = IFieldCustomization(address_book)\n assert u'Time zone' == fc.default_value(IAddressBook['time_zone'], 'label')", "def test_sanity(self, mock_provider):\n p = mock_provider()\n assert p.metadata == {'base_url': 'https://api.mock.com',\n 'provider_name': 'mock_provider',\n 'site_url': 'https://www.mock.com'}\n assert p.arguments == {\n 'not_required': {\n 'oneOf': [\n {'items': {'type': 'string'}, 'minItems': 1, 'type': 'array', 'uniqueItems': True},\n {'type': 'string'}\n ]\n },\n 'required': {'type': 'string'},\n 'message': {'type': 'string'},\n 'option_with_default': {'type': 'string'}\n }\n\n assert p.required == ['required']\n rsp = p.notify(**self.valid_data)\n assert isinstance(rsp, Response)\n assert not rsp.errors\n assert rsp.raise_on_errors() is None\n assert repr(rsp) == '<Response,provider=Mock_provider,status=success>'\n assert repr(p) == '<Provider:[Mock_provider]>'", "def test_get_setting(monkeypatch):\n resp = str(uuid.uuid4())\n arg = str(uuid.uuid4())\n kwarg = str(uuid.uuid4())\n get_secret = Mock(return_value=resp)\n monkeypatch.setattr(\"lambdautils.state.get_secret\", get_secret)\n resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)\n assert resp2 == resp\n get_secret.assert_called_with(arg, kwarg=kwarg)", "def _validate_provider(self, name_or_uuid, **kwargs):\n found = self.client._provider_tree.data(name_or_uuid)\n # If kwargs provided, their names indicate ProviderData attributes\n for attr, expected in kwargs.items():\n try:\n self.assertEqual(getattr(found, attr), expected)\n except AttributeError:\n self.fail(\"Provider with name or UUID %s doesn't have \"\n \"attribute %s (expected value: %s)\" %\n (name_or_uuid, attr, expected))", "def test_set(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )\n\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 1)\n obj = AppSetting.objects.get(name=setting_name, user=self.user)\n self.assertEqual(obj.get_value(), 'value')\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def valid_config(hass: HomeAssistant, requests_mock):\n requests_mock.get(\n \"https://api.vultr.com/v1/account/info?api_key=ABCDEFG1234567\",\n text=load_fixture(\"account_info.json\", \"vultr\"),\n )\n\n with patch(\n \"vultr.Vultr.server_list\",\n return_value=json.loads(load_fixture(\"server_list.json\", \"vultr\")),\n ):\n # Setup hub\n vultr.setup(hass, VALID_CONFIG)", "def test_patch_hyperflex_ucsm_config_policy(self):\n pass", "def test_claims_supported_not_set(self):\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], [])", "def test_bad_custom_params(self):\r\n bad_custom_params = ['test_custom_params: test_custom_param_value']\r\n self.xmodule.custom_parameters = bad_custom_params\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n with self.assertRaises(LTIError):\r\n self.xmodule.get_input_fields()", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def test_default_customer_app_map_search(self):\n pass", "def test_inputs_suitor_prefs(resident_names, hospital_names, capacities, seed):\n\n _, _, match = _make_match(resident_names, hospital_names, capacities, seed)\n\n assert match._check_suitor_prefs()\n\n match.suitors[0].pref_names = [1, 2, 3]\n\n with pytest.raises(Exception):\n match._check_suitor_prefs()", "def _fill_user_entries(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n # For every enabled verification parameter, set its value in its corresponding entry.\n for param in self.verify_params.enabled:\n self._fill_user_entry(self.computer, param)" ]
[ "0.70553404", "0.66161525", "0.63435036", "0.57704574", "0.56966025", "0.5679671", "0.55836266", "0.53828114", "0.53427476", "0.5324517", "0.53106654", "0.52406067", "0.52070177", "0.5202646", "0.5187437", "0.51687354", "0.51323", "0.5131696", "0.5125414", "0.51162493", "0.5099038", "0.50937515", "0.50830257", "0.5074649", "0.5060541", "0.5043621", "0.50380784", "0.5033535", "0.50331086", "0.5019184", "0.5015952", "0.50138354", "0.5010966", "0.5001841", "0.5001106", "0.49965456", "0.49955165", "0.4990686", "0.49857783", "0.4980015", "0.49642426", "0.49610353", "0.49533918", "0.49497026", "0.49431792", "0.49179587", "0.49072146", "0.48993424", "0.48988518", "0.48935488", "0.4889646", "0.48893923", "0.48878428", "0.48860267", "0.4884779", "0.4883129", "0.48821214", "0.48809478", "0.48780823", "0.48780823", "0.48643428", "0.48643366", "0.4858507", "0.48570344", "0.4854182", "0.48500556", "0.4849991", "0.4839541", "0.48388574", "0.4838778", "0.48371297", "0.48260197", "0.48259574", "0.4823445", "0.481888", "0.48185813", "0.48157647", "0.48143628", "0.48136953", "0.4811497", "0.481026", "0.4806178", "0.48027375", "0.48021978", "0.4801643", "0.47981536", "0.47973943", "0.47958946", "0.47932047", "0.47829154", "0.47816893", "0.4781291", "0.4780467", "0.47718394", "0.47702962", "0.47680944", "0.47609353", "0.47575104", "0.47506663", "0.4749671" ]
0.69396245
1
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present_override_other_value(self): value_map = {'country': {'United States': 'blahfake'}} expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } if value_map: provider_settings['sapsf_value_mappings'] = value_map self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_empty_value_override(self):\n\n value_map = {'country': {}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_with_value_default(self):\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,country,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'country': 'Australia'\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)\n\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings),\n default_email='[email protected]'\n )\n self.USER_EMAIL = '[email protected]'\n self._test_register()", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"value\": \"There is a product bought\"}',\n response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/?format=json\", data={\"PLN\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": \"100\"}', response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn('{\"PLN\": \"100\"}', response.content)", "def test_mocked_api_set_new_value(self):\n c = Client()\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(\n self.patch_url, data={\"PLN\": 20, \"EURO\": 20})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)", "def test_entities__FieldCustomization__set_value__2(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())", "def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"", "def test_entities__NoFieldCustomization__set_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n with pytest.raises(NotImplementedError):\n nfc.set_value(IAddressBook['time_zone'], u'label', u'foo')", "def setup_provider(self):\n pass", "def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])", "def setUp(self):\n super().setUp()\n self.mock_requests(get_geocode_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK), \n get_data=copy.deepcopy(test_constants.GET_LIBRARY_API_MOCK))", "def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")", "def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')", "def test_provided_data_takes_precedence_over_environ(self, mock_provider, monkeypatch):\n p = mock_provider()\n prefix = f'mock_'\n monkeypatch.setenv(f'{prefix}{p.provider_name}_required'.upper(), 'foo')\n rsp = p.notify(required='bar', env_prefix=prefix)\n assert rsp.status == 'success'\n assert rsp.data['required'] == 'bar'", "def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])", "def test_update_reg_ex_config(self):\n pass", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)", "def test_value_base_metadata_param(self):\n value = { 'color': 'blue' }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, value=value)\n self.assertEqual(base_meta.value, value)", "def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_set_dict_value_2(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')", "def testMapSetdefault(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap()\n with self.assertRaises(AssertionError):\n m.setdefault(1, data_types.BuildStats())\n with self.assertRaises(AssertionError):\n m.setdefault('1', 2)\n m.setdefault('1', data_types.BuildStats())\n self.assertEqual(m, {'1': data_types.BuildStats()})", "def __init__(self, allow_replace=False):\n self.providers = {}\n self.allow_replace = allow_replace", "def test_update_when_value_is_none(self, mock_req):\n self.setup_api(None, mock_req)\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n self.fake_delay(2)\n sensor.update()\n assert sensor.state is None", "def defaults_provider():\n return getattr(defaults_provider, 'overrides', {})", "def test_patch_user_identity_mapping(self):\n pass", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )", "def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_entities__FieldCustomization__set_value__3(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)", "async def test_no_value_template(hass: HomeAssistant, calls) -> None:\n await _register_fan_sources(hass)\n\n with assert_setup_component(1, \"fan\"):\n test_fan_config = {\n \"preset_mode_template\": \"{{ states('input_select.preset_mode') }}\",\n \"percentage_template\": \"{{ states('input_number.percentage') }}\",\n \"oscillating_template\": \"{{ states('input_select.osc') }}\",\n \"direction_template\": \"{{ states('input_select.direction') }}\",\n \"turn_on\": [\n {\n \"service\": \"input_boolean.turn_on\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_on\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"turn_off\": [\n {\n \"service\": \"input_boolean.turn_off\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_off\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"set_preset_mode\": [\n {\n \"service\": \"input_select.select_option\",\n \"data_template\": {\n \"entity_id\": _PRESET_MODE_INPUT_SELECT,\n \"option\": \"{{ preset_mode }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_preset_mode\",\n \"caller\": \"{{ this.entity_id }}\",\n \"option\": \"{{ preset_mode }}\",\n },\n },\n ],\n \"set_percentage\": [\n {\n \"service\": \"input_number.set_value\",\n \"data_template\": {\n \"entity_id\": _PERCENTAGE_INPUT_NUMBER,\n \"value\": \"{{ percentage }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_value\",\n \"caller\": \"{{ this.entity_id }}\",\n \"value\": \"{{ percentage }}\",\n },\n },\n ],\n }\n assert await setup.async_setup_component(\n hass,\n \"fan\",\n {\"fan\": {\"platform\": \"template\", \"fans\": {\"test_fan\": test_fan_config}}},\n )\n\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()\n\n await common.async_turn_on(hass, _TEST_FAN)\n _verify(hass, STATE_ON, 0, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, 0, None, None, None)\n\n percent = 100\n await common.async_set_percentage(hass, _TEST_FAN, percent)\n assert int(float(hass.states.get(_PERCENTAGE_INPUT_NUMBER).state)) == percent\n _verify(hass, STATE_ON, percent, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, None)\n\n preset = \"auto\"\n await common.async_set_preset_mode(hass, _TEST_FAN, preset)\n assert hass.states.get(_PRESET_MODE_INPUT_SELECT).state == preset\n _verify(hass, STATE_ON, percent, None, None, preset)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_set_direction(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_oscillate(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)", "def test_all_providers(self, mock_provider, monkeypatch):\n\n def mock_providers():\n return ['mock']\n\n monkeypatch.setattr(notifiers, 'all_providers', mock_providers)\n\n assert 'mock' in notifiers.all_providers()", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'random_max_len,value_profile,'\n strategy1.probability = 1\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_patch_hyperflex_proxy_setting_policy(self):\n pass", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def testServiceMapping_ByFactory(self):\n self.DoMappingTest({'/my-service': MyService.new_factory('new-value')})", "def testTestExpectationMap(self):\n self._StringToMapHelper(data_types.TestExpectationMap,\n data_types.ExpectationBuilderMap)", "def test_entities__FieldCustomization__set_value__1(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n assert u'Default time zone value 123' == fc.get_value(field, 'label')\n assert u'Default time zone value 123' == fc.query_value(field, 'label')", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": \"555\"}', response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/product_buy/?format=json\", data={\"price\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"product was bought\", response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn({\"account\": \"455\"}, response.content)", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite", "def test_get_setting(monkeypatch):\n random_key = uuid()\n default_value, actual_value = \"foo\", \"bar\"\n assert helpers.get_setting(random_key, default_value) == default_value\n monkeypatch.setenv(random_key, actual_value)\n assert helpers.get_setting(random_key, default_value) == actual_value", "def test_handle_value_error(self, runway_context: MockRunwayContext) -> None:\n runway_context.add_stubber(\"ecr\")\n with pytest.raises(ValueError) as excinfo:\n EcrLookup.handle(\"unsupported\", runway_context)\n assert str(excinfo.value) == \"ecr lookup does not support 'unsupported'\"\n with pytest.raises(ValueError):\n EcrLookup.handle(\"unsupported::default=something\", runway_context)", "def test_setup(self):\n self.assertIsNotNone(getattr(self, 'original_good_practice_attrs', None))\n self.assertIsNotNone(getattr(self, 'original_get_overrides', None))\n self.assertIsNotNone(getattr(self, 'original_get_alt_field_info', None))\n self.assertIsNone(getattr(self.form, 'is_prepared', None))\n self.assertNotIn('good_practice_attrs', self.form.has_call)\n self.assertNotIn('get_overrides', self.form.has_call)\n self.assertNotIn('get_alt_field_info', self.form.has_call)\n good_practice = self.form.good_practice_attrs()\n if self.good_practice == 'empty':\n self.assertEqual({}, good_practice)\n overrides = self.form.get_overrides()\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual({}, overrides)\n elif self.overrides_empty_or_skip == 'skip':\n self.assertEqual(self.no_resize_override(), overrides)\n if self.alt_info == 'empty':\n self.assertEqual({}, self.form.get_alt_field_info())\n self.assertIn('get_alt_field_info', self.form.has_call)\n self.assertEqual(self.form.get_alt_field_info.__name__, 'empty_get_alt_field_info')\n self.assertIn('good_practice_attrs', self.form.has_call)\n self.assertIn('get_overrides', self.form.has_call)\n self.form.has_call = []\n self.assertEqual(self.form.good_practice_attrs.__name__, 'empty_good_practice_attrs')\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual(self.form.get_overrides.__name__, 'empty_get_overrides')\n self.assertEqual(self.form.get_overrides.__name__, 'skip_get_overrides')\n request_type = 'POST' if self.get_initial_data() else 'GET'\n self.assertEqual(request_type, self.request.method)", "def test_call_alt_params(self):\r\n otu_picker = BlastOtuPicker({'max_e_value': 1e-30})\r\n expected = {}\r\n actual = otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected)\r\n\r\n self.otu_picker = BlastOtuPicker(\r\n {'max_e_value': 1e-3, 'Similarity': 0.90})\r\n expected_90 = {'ref1': ['s3', 's2', 's1'],\r\n 'ref2': ['s4'],\r\n 'ref3': ['s5'],\r\n 'ref4': ['s6']}\r\n actual = self.otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected_90)", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def mocked_config_file_custom_provider():\n data = \"\"\"[YESSSSMS]\nLOGIN = 06501234567\nPASSWD = MySecre3tPassw0rd\nDEFAULT_TO = +43664123123123\n# MVNO = FANTASYMOBILE\n[YESSSSMS_PROVIDER_URLS]\nLOGIN_URL = mock://kontomanager.at/index.php\nLOGOUT_URL = mock://kontomanager.at/index.php?dologout=2\nKONTOMANAGER_URL = mock://kontomanager.at/kundendaten.php\nWEBSMS_FORM_URL = mock://kontomanager.at/websms.php\nSEND_SMS_URL = mock://kontomanager.at/websms_send.php\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def test_apply_startup_params(self):\n self.assertRaises(NotImplementedException,\n self.driver.apply_startup_params)", "def test_map_view_context_fields_values(\n self,\n mock_installation_statistics_model_overall_counts,\n mock_installation_statistics_model_data_per_period,\n mock_installation_statistics_model_timeline,\n mock_get_data_created_datetime_scope\n ): # pylint: disable=too-many-locals\n mock_timeline = ['2017-05-14', '2017-05-15', '2017-05-16']\n mock_students, mock_courses, mock_instances = [4124, 5122, 6412], [110, 211, 167], [30, 20, 25]\n mock_instances_count, mock_courses_count, mock_students_count, mock_certificates_count = 6412, 167, 25, 0\n mock_first_datetime_of_update_data = datetime(2017, 6, 1, 14, 56, 18)\n mock_last_datetime_of_update_data = datetime(2017, 7, 2, 23, 12, 8)\n\n mock_installation_statistics_model_timeline.return_value = mock_timeline\n\n mock_installation_statistics_model_data_per_period.return_value = mock_students, mock_courses, mock_instances\n\n mock_installation_statistics_model_overall_counts.return_value = {\n \"instances_count\": 6412,\n \"courses_count\": 167,\n \"students_count\": 25,\n \"generated_certificates_count\": 0,\n }\n\n mock_get_data_created_datetime_scope.return_value = \\\n mock_first_datetime_of_update_data, mock_last_datetime_of_update_data\n\n response = self.client.get('/')\n\n self.assertEqual(json.loads(response.context['timeline']), mock_timeline)\n self.assertEqual(json.loads(response.context['students']), mock_students)\n self.assertEqual(json.loads(response.context['courses']), mock_courses)\n self.assertEqual(json.loads(response.context['instances']), mock_instances)\n self.assertEqual(response.context['instances_count'], mock_instances_count)\n self.assertEqual(response.context['students_count'], mock_students_count)\n self.assertEqual(response.context['courses_count'], mock_courses_count)\n self.assertEqual(response.context['generated_certificates_count'], mock_certificates_count)\n self.assertEqual(response.context['first_datetime_of_update_data'], mock_first_datetime_of_update_data)\n self.assertEqual(response.context['last_datetime_of_update_data'], mock_last_datetime_of_update_data)", "def test_settings_proxy_properties_setting(parameters: Dict[str, Any]) -> None:\n settings = Settings()\n settings_proxy = settings.create_proxy()\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))\n\n for key, value in parameters.items():\n settings.__setattr__(key, value)\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))", "def test_config_device_init_with_defaults(get_config, monkeypatch):\n notbase_config = {'not_presented': 1}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', notbase_config)\n cfg = get_config(DeviceConfig, base_config)\n\n assert isinstance(cfg, DeviceConfig), 'wrong class'\n assert cfg.data == notbase_config, 'bad config loaded'", "async def test_basic_setup(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: ProtectCamera\n):\n\n camera_high_only = mock_camera.copy(deep=True)\n camera_high_only._api = mock_entry.api\n camera_high_only.channels[0]._api = mock_entry.api\n camera_high_only.channels[1]._api = mock_entry.api\n camera_high_only.channels[2]._api = mock_entry.api\n camera_high_only.name = \"Test Camera 1\"\n camera_high_only.id = \"test_high\"\n camera_high_only.channels[0].is_rtsp_enabled = True\n camera_high_only.channels[0].name = \"High\"\n camera_high_only.channels[0].rtsp_alias = \"test_high_alias\"\n camera_high_only.channels[1].is_rtsp_enabled = False\n camera_high_only.channels[2].is_rtsp_enabled = False\n\n camera_medium_only = mock_camera.copy(deep=True)\n camera_medium_only._api = mock_entry.api\n camera_medium_only.channels[0]._api = mock_entry.api\n camera_medium_only.channels[1]._api = mock_entry.api\n camera_medium_only.channels[2]._api = mock_entry.api\n camera_medium_only.name = \"Test Camera 2\"\n camera_medium_only.id = \"test_medium\"\n camera_medium_only.channels[0].is_rtsp_enabled = False\n camera_medium_only.channels[1].is_rtsp_enabled = True\n camera_medium_only.channels[1].name = \"Medium\"\n camera_medium_only.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_medium_only.channels[2].is_rtsp_enabled = False\n\n camera_all_channels = mock_camera.copy(deep=True)\n camera_all_channels._api = mock_entry.api\n camera_all_channels.channels[0]._api = mock_entry.api\n camera_all_channels.channels[1]._api = mock_entry.api\n camera_all_channels.channels[2]._api = mock_entry.api\n camera_all_channels.name = \"Test Camera 3\"\n camera_all_channels.id = \"test_all\"\n camera_all_channels.channels[0].is_rtsp_enabled = True\n camera_all_channels.channels[0].name = \"High\"\n camera_all_channels.channels[0].rtsp_alias = \"test_high_alias\"\n camera_all_channels.channels[1].is_rtsp_enabled = True\n camera_all_channels.channels[1].name = \"Medium\"\n camera_all_channels.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_all_channels.channels[2].is_rtsp_enabled = True\n camera_all_channels.channels[2].name = \"Low\"\n camera_all_channels.channels[2].rtsp_alias = \"test_low_alias\"\n\n camera_no_channels = mock_camera.copy(deep=True)\n camera_no_channels._api = mock_entry.api\n camera_no_channels.channels[0]._api = mock_entry.api\n camera_no_channels.channels[1]._api = mock_entry.api\n camera_no_channels.channels[2]._api = mock_entry.api\n camera_no_channels.name = \"Test Camera 4\"\n camera_no_channels.id = \"test_none\"\n camera_no_channels.channels[0].is_rtsp_enabled = False\n camera_no_channels.channels[0].name = \"High\"\n camera_no_channels.channels[1].is_rtsp_enabled = False\n camera_no_channels.channels[2].is_rtsp_enabled = False\n\n camera_package = mock_camera.copy(deep=True)\n camera_package._api = mock_entry.api\n camera_package.channels[0]._api = mock_entry.api\n camera_package.channels[1]._api = mock_entry.api\n camera_package.channels[2]._api = mock_entry.api\n camera_package.name = \"Test Camera 5\"\n camera_package.id = \"test_package\"\n camera_package.channels[0].is_rtsp_enabled = True\n camera_package.channels[0].name = \"High\"\n camera_package.channels[0].rtsp_alias = \"test_high_alias\"\n camera_package.channels[1].is_rtsp_enabled = False\n camera_package.channels[2].is_rtsp_enabled = False\n package_channel = camera_package.channels[0].copy(deep=True)\n package_channel.is_rtsp_enabled = False\n package_channel.name = \"Package Camera\"\n package_channel.id = 3\n package_channel.fps = 2\n package_channel.rtsp_alias = \"test_package_alias\"\n camera_package.channels.append(package_channel)\n\n mock_entry.api.bootstrap.cameras = {\n camera_high_only.id: camera_high_only,\n camera_medium_only.id: camera_medium_only,\n camera_all_channels.id: camera_all_channels,\n camera_no_channels.id: camera_no_channels,\n camera_package.id: camera_package,\n }\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.CAMERA, 14, 6)\n\n # test camera 1\n entity_id = validate_default_camera_entity(hass, camera_high_only, 0)\n await validate_rtsps_camera_state(hass, camera_high_only, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_high_only, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_high_only, 0, entity_id)\n\n # test camera 2\n entity_id = validate_default_camera_entity(hass, camera_medium_only, 1)\n await validate_rtsps_camera_state(hass, camera_medium_only, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_medium_only, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_medium_only, 1, entity_id)\n\n # test camera 3\n entity_id = validate_default_camera_entity(hass, camera_all_channels, 0)\n await validate_rtsps_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 2, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 2, entity_id)\n\n # test camera 4\n entity_id = validate_default_camera_entity(hass, camera_no_channels, 0)\n await validate_no_stream_camera_state(\n hass, camera_no_channels, 0, entity_id, features=0\n )\n\n # test camera 5\n entity_id = validate_default_camera_entity(hass, camera_package, 0)\n await validate_rtsps_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_package, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_default_camera_entity(hass, camera_package, 3)\n await validate_no_stream_camera_state(\n hass, camera_package, 3, entity_id, features=0\n )", "def setUp(self):\n super().setUp()\n\n self.provider = Provider.objects.filter(type=Provider.PROVIDER_OCP).first()\n\n ocp_metric = metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR\n ocp_source_type = Provider.PROVIDER_OCP\n tiered_rates = [{\"unit\": \"USD\", \"value\": 0.22}]\n self.ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": ocp_source_type,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}, \"tiered_rates\": tiered_rates}],\n \"currency\": \"USD\",\n }\n self.basic_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}}],\n \"currency\": \"USD\",\n }", "async def test_form_multiple_services(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=FAKE_SERVICES):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_FORM\n assert result2[\"step_id\"] == \"service\"\n assert result2[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]]},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result3[\"title\"] == TEST_USERNAME\n assert result3[\"data\"] == FAKE_DATA\n assert result3[\"options\"] == {\n CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]],\n }\n assert len(mock_setup_entry.mock_calls) == 1", "def requires_mapping(self):", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia860\"] = Eia860Settings()\n values[\"eia861\"] = Eia861Settings()\n values[\"eia923\"] = Eia923Settings()\n\n return values", "def test_set_dict_value_3(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"C\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"C\")", "def used_implementation(self, impl: str, value):", "def test_real_value(self):\n setting_model = Setting(python_type='list', dry_value='')\n self.assertEqual(setting_model.value, [])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Lancelot,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Lancelot', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Robin,Patsy',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Robin', 'Patsy'])", "def test_input_identifier_types(self):\n # It's okay to set INPUT_IDENTIFIER_TYPES to None it means you\n # will cover any and all identifier types.\n class Base(IdentifierCoverageProvider):\n SERVICE_NAME = \"Test provider\"\n DATA_SOURCE_NAME = DataSource.GUTENBERG\n\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = None\n provider = MockProvider(self._db)\n assert None == provider.input_identifier_types\n\n # It's okay to set a single value.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = Identifier.ISBN\n provider = MockProvider(self._db)\n assert [Identifier.ISBN] == provider.input_identifier_types\n\n # It's okay to set a list of values.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = [Identifier.ISBN, Identifier.OVERDRIVE_ID]\n provider = MockProvider(self._db)\n assert ([Identifier.ISBN, Identifier.OVERDRIVE_ID] ==\n provider.input_identifier_types)\n\n # It's not okay to do nothing.\n class MockProvider(Base):\n pass\n with pytest.raises(ValueError) as excinfo:\n MockProvider(self._db)\n assert \"MockProvider must define INPUT_IDENTIFIER_TYPES, even if the value is None.\" in str(excinfo.value)", "def _verify_map(self):\n # ---- verify self.info ----\n if self.info['contype'] == NotImplemented:\n # 'contype' must be defined\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n elif self.info['contype'] not in ['motion',\n 'waveform',\n 'power']:\n # 'contype' must be one of specified type\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n\n # ---- verity self.configs ----\n # 'dataset fields' must be defined\n if 'dataset fields' not in self.configs:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n elif self.configs['dataset fields'] == NotImplemented:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n\n # 'dset field to numpy field' must be defined\n # - each 'dataset field' needs a mapping to a structured numpy\n # field for hdfReadControl\n # - 'dset field to numpy field' is a list of 3-element tuples\n # where each entry in the list corresponds to a dataset field\n # name\n # - the 3-element tuple must follow the format:\n #\n # self.configs['dset field to numpy field'][i] = (\n # str, # dataset field name\n # str, # corresponding structured numpy field name\n # int) # index of structured numpy field\n #\n # For example, the '6K Compumotor would look like...\n # self.configs['dset field to numpy'] = [\n # ('x', 'xyz', 0),\n # ('y', 'xyz', 1),\n # ('z', 'xyz', 2)]\n #\n key = 'dset field to numpy field'\n if key not in self.configs:\n raise NotImplementedError\n elif self.configs[key] == NotImplemented:\n raise NotImplementedError\n elif type(self.configs[key]) is not list:\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(isinstance(val, tuple)\n for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(len(val) == 3 for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n else:\n err = False\n dset_fields = [name\n for name, dftype\n in self.configs['dataset fields']]\n for dfname, npfname, npi in self.configs[key]:\n if dfname not in dset_fields:\n err = True\n break\n elif type(npfname) is not str:\n err = True\n break\n elif type(npi) is not int:\n err = True\n break\n if err:\n errstr = \"self.configs['dset field to numpy field] \" \\\n + \"must be a list of 3-element tuples\"\n raise Exception(errstr)\n\n # contype == 'motion' specific verification\n if self.contype == 'motion':\n # verify 'motion list'\n if 'motion list' not in self.configs:\n # 'motion list' exists\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['motion list'] == NotImplemented:\n # 'motion list' is defined\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'motion list' must have its own config\n for name in self.configs['motion list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['motion \" \\\n \"name'] for each motion list in \" \\\n \"self.configs['motion list'] = \" \\\n \"[motion name, ]\"\n raise NotImplementedError(errstr)\n\n # verify 'probe list'\n if 'probe list' not in self.configs:\n # 'probe list' exists\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['probe list'] == NotImplemented:\n # 'probe list' is defined\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'probe list' must have its own config\n for name in self.configs['probe list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['probe \" \\\n \"name'] for each probe in \" \\\n \"self.configs['probe list'] = \" \\\n \"[probe name, ]\"\n raise NotImplementedError(errstr)\n\n # delete 'config names' if present\n if 'config names' in self.configs:\n del self.configs['config names']\n\n # verify all other contypes\n if self.contype != 'motion':\n # remove 'motion list'\n if 'motion list' in self.configs:\n # remove 'motion list' children\n for name in self.configs['motion list']:\n if name in self.configs:\n del(self.configs[name])\n\n # remove 'motion list'\n del(self.configs['motion list'])\n\n # remove 'probe list'\n if 'probe list' in self.configs:\n # remove 'probe list' children\n for name in self.configs['probe list']:\n if name in self.configs:\n del (self.configs[name])\n\n # remove 'motion list'\n del (self.configs['probe list'])\n\n # verify 'command list'\n # if 'command list' not in self.configs:\n # # 'command list' exists\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)\n # elif self.configs['command list'] == NotImplemented:\n # # 'motion list' is defined\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)", "def test__authenticate_with_custom_port(self):\n get_cloud_config_mock = [\n \"proxmox.connection.url\",\n \"9999\",\n \"fakeuser\",\n \"secretpassword\",\n True,\n ]\n requests_post_mock = MagicMock()\n with patch(\n \"salt.config.get_cloud_config_value\",\n autospec=True,\n side_effect=get_cloud_config_mock,\n ), patch(\"requests.post\", requests_post_mock):\n proxmox._authenticate()\n requests_post_mock.assert_called_with(\n \"https://proxmox.connection.url:9999/api2/json/access/ticket\",\n verify=True,\n data={\"username\": (\"fakeuser\",), \"password\": \"secretpassword\"},\n )", "def test_prep_overrides(self):\n original_data = self.form.data\n test_data = original_data.copy()\n test_data._mutable = False\n self.form.data = test_data # copied only to allow tear-down reverting to original.\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n self.form.fields = test_fields # copied to allow tear-down reverting to original.\n original_get_overrides = self.form.get_overrides\n def replace_overrides(): return self.formfield_attrs_overrides\n self.form.get_overrides = replace_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = {}\n overrides = self.formfield_attrs_overrides.copy()\n DEFAULT = overrides.pop('_default_')\n expected_attrs = {}\n for name, field in test_fields.items():\n attrs = field.widget.attrs.copy()\n if isinstance(field.widget, (RadioSelect, CheckboxSelectMultiple, CheckboxInput, )):\n pass # update if similar section in prep_fields is updated.\n attrs.update(overrides.get(name, {}))\n # TODO: setup structure for using default or defined version for all CharFields.\n no_resize = overrides.get(name, {}).pop('no_size_override', False)\n no_resize = True if isinstance(field.widget, (HiddenInput, MultipleHiddenInput)) else no_resize\n if no_resize:\n expected_attrs[name] = attrs\n continue # None of the following size overrides are applied for this field.\n if isinstance(field.widget, Textarea):\n width_attr_name = 'cols'\n default = DEFAULT.get('cols', None)\n display_size = attrs.get('cols', None)\n if 'rows' in DEFAULT:\n height = attrs.get('rows', None)\n height = min((DEFAULT['rows'], int(height))) if height else DEFAULT['rows']\n attrs['rows'] = str(height)\n if default: # For textarea, we always override. The others depend on different conditions.\n display_size = display_size or default\n display_size = min((int(display_size), int(default)))\n elif issubclass(field.__class__, CharField):\n width_attr_name = 'size' # 'size' is only valid for input types: email, password, tel, text\n default = DEFAULT.get('size', None) # Cannot use float(\"inf\") as an int.\n display_size = attrs.get('size', None)\n else: # This field does not have a size setting.\n width_attr_name, default, display_size = None, None, None\n input_size = attrs.get('maxlength', None)\n possible_size = [int(ea) for ea in (display_size or default, input_size) if ea]\n # attrs['size'] = str(int(min(float(display_size), float(input_size)))) # Can't use float(\"inf\") as an int.\n if possible_size and width_attr_name:\n attrs[width_attr_name] = str(min(possible_size))\n expected_attrs[name] = attrs\n # Expected:\n # formfield_attrs_overrides = {\n # '_default_': {'size': 15, 'cols': 20, 'rows': 4, },\n # 'first': {'maxlength': 191, 'size': 20, },\n # 'second': {'maxlength': 2, }, # 'size': 2,\n # 'last': {'maxlength': 2, 'size': 5, },\n # }\n result_fields = self.form.prep_fields()\n result_attrs = {name: field.widget.attrs.copy() for name, field in result_fields.items()}\n first_maxlength = expected_attrs['first']['maxlength'] # overrides['first']['maxlength']\n first_size = expected_attrs['first']['size'] # overrides['first']['size']\n second_maxlength = expected_attrs['second']['maxlength'] # overrides['second']['maxlength']\n last_maxlength = expected_attrs['last']['maxlength'] # overrides['last']['maxlength']\n last_size = expected_attrs['last']['size'] # overrides['last']['size']\n\n self.assertEqual(first_maxlength, result_fields['first'].widget.attrs.get('maxlength', None))\n self.assertEqual(first_size, result_fields['first'].widget.attrs.get('size', None))\n self.assertEqual(second_maxlength, result_fields['second'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_maxlength, result_fields['last'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_size, result_fields['last'].widget.attrs.get('size', None))\n for key, val in expected_attrs.items():\n self.assertEqual(val, result_attrs[key])\n self.assertDictEqual(expected_attrs, result_attrs)\n\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides", "def test_function_callparams(self):\n\n @Configurable(conf=category('', Parameter('test', value=True)))\n def twist(test=None):\n return test\n\n value = twist()\n\n self.assertTrue(value)\n\n value = twist(False)\n\n self.assertFalse(value)\n\n Configurable.get_annotations(twist)[0].conf['']['test'].value = 1\n\n value = twist()\n\n self.assertEqual(value, 1)", "def test_bad_ext_app_setting(self, mock_settings):\n lang, _ = Language.objects.get_or_create(name=\"Test Language\", code=\"text-x-lang\")\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": None})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": {}})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": ()})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": \" \"})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1.0})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)", "async def test_plenticore_async_setup_g1(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_apiclient: ApiClient,\n) -> None:\n mock_apiclient.get_settings = AsyncMock(\n return_value={\"scb:network\": [SettingsData({\"id\": \"Hostname\"})]}\n )\n mock_apiclient.get_setting_values = AsyncMock(\n # G1 model has the entry id \"Hostname\"\n return_value={\n \"devices:local\": {\n \"Properties:SerialNo\": \"12345\",\n \"Branding:ProductName1\": \"PLENTICORE\",\n \"Branding:ProductName2\": \"plus 10\",\n \"Properties:VersionIOC\": \"01.45\",\n \"Properties:VersionMC\": \"01.46\",\n },\n \"scb:network\": {\"Hostname\": \"scb\"},\n }\n )\n\n mock_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n plenticore = hass.data[DOMAIN][mock_config_entry.entry_id]\n\n assert plenticore.device_info == DeviceInfo(\n configuration_url=\"http://192.168.1.2\",\n identifiers={(DOMAIN, \"12345\")},\n manufacturer=\"Kostal\",\n model=\"PLENTICORE plus 10\",\n name=\"scb\",\n sw_version=\"IOC: 01.45 MC: 01.46\",\n )", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia\"] = EiaSettings()\n values[\"epacems\"] = EpaCemsSettings()\n values[\"ferc1\"] = Ferc1Settings()\n values[\"ferc714\"] = Ferc714Settings()\n values[\"glue\"] = GlueSettings()\n\n return values", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {0: ['cdhit_test_seqs_0'],\r\n 1: ['cdhit_test_seqs_1'],\r\n 2: ['cdhit_test_seqs_2'],\r\n 3: ['cdhit_test_seqs_3'],\r\n 4: ['cdhit_test_seqs_4'],\r\n 5: ['cdhit_test_seqs_5'],\r\n 6: ['cdhit_test_seqs_6'],\r\n 7: ['cdhit_test_seqs_7'],\r\n 8: ['cdhit_test_seqs_8'],\r\n 9: ['cdhit_test_seqs_9']}\r\n\r\n app = CdHitOtuPicker(params={})\r\n obs = app(self.tmp_seq_filepath1)\r\n self.assertEqual(obs, exp)", "def test_get_user_settings(client, jwt, session, keycloak_mock, monkeypatch): # pylint:disable=unused-argument\n user_model = factory_user_model(user_info=TestUserInfo.user_test)\n contact = factory_contact_model()\n contact_link = ContactLinkModel()\n contact_link.contact = contact\n contact_link.user = user_model\n contact_link.commit()\n kc_id = user_model.keycloak_guid\n\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id)\n claims['idp_userid'] = str(user_model.idp_userid)\n patch_token_info(claims, monkeypatch)\n\n OrgService.create_org(TestOrgInfo.org_branch_name, user_id=user_model.id)\n\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id}/settings', headers=headers, content_type='application/json')\n item_list = rv.json\n account = next(obj for obj in item_list if obj['type'] == 'ACCOUNT')\n assert account['accountType'] == 'BASIC'\n assert account['additionalLabel'] == TestOrgInfo.org_branch_name.get('branchName')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n assert account['productSettings'] == f'/account/{account[\"id\"]}/restricted-product'\n\n kc_id_no_user = TestUserInfo.user1.get('keycloak_guid')\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id_no_user)\n patch_token_info(claims, monkeypatch)\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id_no_user}/settings', headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n item_list = rv.json\n account = next((obj for obj in item_list if obj['type'] == 'ACCOUNT'), None)\n assert account is None\n user_profile = next(obj for obj in item_list if obj['type'] == 'USER_PROFILE')\n assert '/userprofile' in user_profile.get('urlpath')", "async def test_setup_multiple(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n bootstrap: Bootstrap,\n) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac\n\n nvr = bootstrap.nvr\n nvr._api = ufp.api\n nvr.mac = \"A1E00C826983\"\n nvr.id\n ufp.api.get_nvr = AsyncMock(return_value=nvr)\n\n with patch(\n \"homeassistant.components.unifiprotect.utils.ProtectApiClient\"\n ) as mock_api:\n mock_config = MockConfigEntry(\n domain=DOMAIN,\n data={\n \"host\": \"1.1.1.1\",\n \"username\": \"test-username\",\n \"password\": \"test-password\",\n \"id\": \"UnifiProtect\",\n \"port\": 443,\n \"verify_ssl\": False,\n },\n version=2,\n )\n mock_config.add_to_hass(hass)\n\n mock_api.return_value = ufp.api\n\n await hass.config_entries.async_setup(mock_config.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert mock_config.unique_id == ufp.api.bootstrap.nvr.mac", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def test_sanity(self, mock_provider):\n p = mock_provider()\n assert p.metadata == {'base_url': 'https://api.mock.com',\n 'provider_name': 'mock_provider',\n 'site_url': 'https://www.mock.com'}\n assert p.arguments == {\n 'not_required': {\n 'oneOf': [\n {'items': {'type': 'string'}, 'minItems': 1, 'type': 'array', 'uniqueItems': True},\n {'type': 'string'}\n ]\n },\n 'required': {'type': 'string'},\n 'message': {'type': 'string'},\n 'option_with_default': {'type': 'string'}\n }\n\n assert p.required == ['required']\n rsp = p.notify(**self.valid_data)\n assert isinstance(rsp, Response)\n assert not rsp.errors\n assert rsp.raise_on_errors() is None\n assert repr(rsp) == '<Response,provider=Mock_provider,status=success>'\n assert repr(p) == '<Provider:[Mock_provider]>'", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 1\n strategy1.engine = 'afl'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_entities__FieldCustomization__default_value__1(address_book):\n fc = IFieldCustomization(address_book)\n assert u'Time zone' == fc.default_value(IAddressBook['time_zone'], 'label')", "def test_get_setting(monkeypatch):\n resp = str(uuid.uuid4())\n arg = str(uuid.uuid4())\n kwarg = str(uuid.uuid4())\n get_secret = Mock(return_value=resp)\n monkeypatch.setattr(\"lambdautils.state.get_secret\", get_secret)\n resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)\n assert resp2 == resp\n get_secret.assert_called_with(arg, kwarg=kwarg)", "def _validate_provider(self, name_or_uuid, **kwargs):\n found = self.client._provider_tree.data(name_or_uuid)\n # If kwargs provided, their names indicate ProviderData attributes\n for attr, expected in kwargs.items():\n try:\n self.assertEqual(getattr(found, attr), expected)\n except AttributeError:\n self.fail(\"Provider with name or UUID %s doesn't have \"\n \"attribute %s (expected value: %s)\" %\n (name_or_uuid, attr, expected))", "def valid_config(hass: HomeAssistant, requests_mock):\n requests_mock.get(\n \"https://api.vultr.com/v1/account/info?api_key=ABCDEFG1234567\",\n text=load_fixture(\"account_info.json\", \"vultr\"),\n )\n\n with patch(\n \"vultr.Vultr.server_list\",\n return_value=json.loads(load_fixture(\"server_list.json\", \"vultr\")),\n ):\n # Setup hub\n vultr.setup(hass, VALID_CONFIG)", "def test_set(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )\n\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 1)\n obj = AppSetting.objects.get(name=setting_name, user=self.user)\n self.assertEqual(obj.get_value(), 'value')\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_patch_hyperflex_ucsm_config_policy(self):\n pass", "def test_claims_supported_not_set(self):\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], [])", "def test_bad_custom_params(self):\r\n bad_custom_params = ['test_custom_params: test_custom_param_value']\r\n self.xmodule.custom_parameters = bad_custom_params\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n with self.assertRaises(LTIError):\r\n self.xmodule.get_input_fields()", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def test_default_customer_app_map_search(self):\n pass", "def test__VerificationFieldPlatform__value():\n for instance in VerificationFieldPlatform.INSTANCES.values():\n vampytest.assert_instance(instance.value, VerificationFieldPlatform.VALUE_TYPE)", "def test_inputs_suitor_prefs(resident_names, hospital_names, capacities, seed):\n\n _, _, match = _make_match(resident_names, hospital_names, capacities, seed)\n\n assert match._check_suitor_prefs()\n\n match.suitors[0].pref_names = [1, 2, 3]\n\n with pytest.raises(Exception):\n match._check_suitor_prefs()" ]
[ "0.6940789", "0.6617741", "0.6345586", "0.57743275", "0.5698771", "0.56804734", "0.55835336", "0.5381514", "0.5341755", "0.5321272", "0.5309637", "0.5242255", "0.5205009", "0.5200889", "0.5184601", "0.51705694", "0.51325846", "0.5132514", "0.5125601", "0.51126003", "0.50996953", "0.509318", "0.5084065", "0.5074553", "0.5061046", "0.50475365", "0.5037039", "0.5034832", "0.5031776", "0.50172603", "0.501643", "0.5013731", "0.50090945", "0.5001902", "0.4999957", "0.4997367", "0.49963045", "0.4990176", "0.49847022", "0.49797472", "0.4966269", "0.4962959", "0.49531898", "0.4952341", "0.49419162", "0.49218944", "0.4907446", "0.49002072", "0.48969236", "0.48909447", "0.48889926", "0.4888356", "0.4887152", "0.48866668", "0.48839447", "0.48830232", "0.4879972", "0.4878541", "0.4876839", "0.4876839", "0.4866257", "0.48644382", "0.48599505", "0.48557085", "0.4852876", "0.4852096", "0.48510453", "0.48420906", "0.4838144", "0.4836454", "0.48361173", "0.48254094", "0.4825269", "0.48243293", "0.48197612", "0.48164678", "0.4814834", "0.48147961", "0.48140416", "0.48125505", "0.48095378", "0.48059368", "0.48036745", "0.4803195", "0.4802615", "0.47988525", "0.47969177", "0.47951865", "0.4793651", "0.47849283", "0.47826838", "0.47821727", "0.47805625", "0.47737014", "0.47730505", "0.47687742", "0.4760722", "0.47584417", "0.47519702", "0.47517416" ]
0.70560694
0
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present_empty_value_override(self): value_map = {'country': {}} expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } if value_map: provider_settings['sapsf_value_mappings'] = value_map self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_register_sapsf_with_value_default(self):\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,country,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'country': 'Australia'\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)\n\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings),\n default_email='[email protected]'\n )\n self.USER_EMAIL = '[email protected]'\n self._test_register()", "def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"value\": \"There is a product bought\"}',\n response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/?format=json\", data={\"PLN\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": \"100\"}', response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn('{\"PLN\": \"100\"}', response.content)", "def test_mocked_api_set_new_value(self):\n c = Client()\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(\n self.patch_url, data={\"PLN\": 20, \"EURO\": 20})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)", "def test_entities__FieldCustomization__set_value__2(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())", "def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"", "def test_entities__NoFieldCustomization__set_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n with pytest.raises(NotImplementedError):\n nfc.set_value(IAddressBook['time_zone'], u'label', u'foo')", "def setup_provider(self):\n pass", "def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])", "def setUp(self):\n super().setUp()\n self.mock_requests(get_geocode_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK), \n get_data=copy.deepcopy(test_constants.GET_LIBRARY_API_MOCK))", "def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")", "def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')", "def test_provided_data_takes_precedence_over_environ(self, mock_provider, monkeypatch):\n p = mock_provider()\n prefix = f'mock_'\n monkeypatch.setenv(f'{prefix}{p.provider_name}_required'.upper(), 'foo')\n rsp = p.notify(required='bar', env_prefix=prefix)\n assert rsp.status == 'success'\n assert rsp.data['required'] == 'bar'", "def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])", "def test_update_reg_ex_config(self):\n pass", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)", "def test_value_base_metadata_param(self):\n value = { 'color': 'blue' }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, value=value)\n self.assertEqual(base_meta.value, value)", "def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_set_dict_value_2(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')", "def testMapSetdefault(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap()\n with self.assertRaises(AssertionError):\n m.setdefault(1, data_types.BuildStats())\n with self.assertRaises(AssertionError):\n m.setdefault('1', 2)\n m.setdefault('1', data_types.BuildStats())\n self.assertEqual(m, {'1': data_types.BuildStats()})", "def __init__(self, allow_replace=False):\n self.providers = {}\n self.allow_replace = allow_replace", "def defaults_provider():\n return getattr(defaults_provider, 'overrides', {})", "def test_update_when_value_is_none(self, mock_req):\n self.setup_api(None, mock_req)\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n self.fake_delay(2)\n sensor.update()\n assert sensor.state is None", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def test_patch_user_identity_mapping(self):\n pass", "def test_good_custom_params(self):\r\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n self.xmodule.get_input_fields()\r\n self.xmodule.oauth_params.assert_called_with(\r\n {u'custom_test_custom_params': u'test_custom_param_value'},\r\n 'test_client_key', 'test_client_secret'\r\n )", "def test_good_custom_params(self):\n self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\n self.xmodule.oauth_params = Mock()\n self.xmodule.get_input_fields()\n self.xmodule.oauth_params.assert_called_with(\n {'custom_test_custom_params': 'test_custom_param_value'},\n 'test_client_key', 'test_client_secret'\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def test_entities__FieldCustomization__set_value__3(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)", "async def test_no_value_template(hass: HomeAssistant, calls) -> None:\n await _register_fan_sources(hass)\n\n with assert_setup_component(1, \"fan\"):\n test_fan_config = {\n \"preset_mode_template\": \"{{ states('input_select.preset_mode') }}\",\n \"percentage_template\": \"{{ states('input_number.percentage') }}\",\n \"oscillating_template\": \"{{ states('input_select.osc') }}\",\n \"direction_template\": \"{{ states('input_select.direction') }}\",\n \"turn_on\": [\n {\n \"service\": \"input_boolean.turn_on\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_on\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"turn_off\": [\n {\n \"service\": \"input_boolean.turn_off\",\n \"entity_id\": _STATE_INPUT_BOOLEAN,\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"turn_off\",\n \"caller\": \"{{ this.entity_id }}\",\n },\n },\n ],\n \"set_preset_mode\": [\n {\n \"service\": \"input_select.select_option\",\n \"data_template\": {\n \"entity_id\": _PRESET_MODE_INPUT_SELECT,\n \"option\": \"{{ preset_mode }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_preset_mode\",\n \"caller\": \"{{ this.entity_id }}\",\n \"option\": \"{{ preset_mode }}\",\n },\n },\n ],\n \"set_percentage\": [\n {\n \"service\": \"input_number.set_value\",\n \"data_template\": {\n \"entity_id\": _PERCENTAGE_INPUT_NUMBER,\n \"value\": \"{{ percentage }}\",\n },\n },\n {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"action\": \"set_value\",\n \"caller\": \"{{ this.entity_id }}\",\n \"value\": \"{{ percentage }}\",\n },\n },\n ],\n }\n assert await setup.async_setup_component(\n hass,\n \"fan\",\n {\"fan\": {\"platform\": \"template\", \"fans\": {\"test_fan\": test_fan_config}}},\n )\n\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()\n\n await common.async_turn_on(hass, _TEST_FAN)\n _verify(hass, STATE_ON, 0, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, 0, None, None, None)\n\n percent = 100\n await common.async_set_percentage(hass, _TEST_FAN, percent)\n assert int(float(hass.states.get(_PERCENTAGE_INPUT_NUMBER).state)) == percent\n _verify(hass, STATE_ON, percent, None, None, None)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, None)\n\n preset = \"auto\"\n await common.async_set_preset_mode(hass, _TEST_FAN, preset)\n assert hass.states.get(_PRESET_MODE_INPUT_SELECT).state == preset\n _verify(hass, STATE_ON, percent, None, None, preset)\n\n await common.async_turn_off(hass, _TEST_FAN)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_set_direction(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)\n\n await common.async_oscillate(hass, _TEST_FAN, True)\n _verify(hass, STATE_OFF, percent, None, None, preset)", "def test_all_providers(self, mock_provider, monkeypatch):\n\n def mock_providers():\n return ['mock']\n\n monkeypatch.setattr(notifiers, 'all_providers', mock_providers)\n\n assert 'mock' in notifiers.all_providers()", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'random_max_len,value_profile,'\n strategy1.probability = 1\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_patch_hyperflex_proxy_setting_policy(self):\n pass", "def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': '[email protected]',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)", "def testServiceMapping_ByFactory(self):\n self.DoMappingTest({'/my-service': MyService.new_factory('new-value')})", "def testTestExpectationMap(self):\n self._StringToMapHelper(data_types.TestExpectationMap,\n data_types.ExpectationBuilderMap)", "def test_entities__FieldCustomization__set_value__1(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n assert u'Default time zone value 123' == fc.get_value(field, 'label')\n assert u'Default time zone value 123' == fc.query_value(field, 'label')", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": \"555\"}', response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/product_buy/?format=json\", data={\"price\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"product was bought\", response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn({\"account\": \"455\"}, response.content)", "def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite", "def test_get_setting(monkeypatch):\n random_key = uuid()\n default_value, actual_value = \"foo\", \"bar\"\n assert helpers.get_setting(random_key, default_value) == default_value\n monkeypatch.setenv(random_key, actual_value)\n assert helpers.get_setting(random_key, default_value) == actual_value", "def test_handle_value_error(self, runway_context: MockRunwayContext) -> None:\n runway_context.add_stubber(\"ecr\")\n with pytest.raises(ValueError) as excinfo:\n EcrLookup.handle(\"unsupported\", runway_context)\n assert str(excinfo.value) == \"ecr lookup does not support 'unsupported'\"\n with pytest.raises(ValueError):\n EcrLookup.handle(\"unsupported::default=something\", runway_context)", "def test_setup(self):\n self.assertIsNotNone(getattr(self, 'original_good_practice_attrs', None))\n self.assertIsNotNone(getattr(self, 'original_get_overrides', None))\n self.assertIsNotNone(getattr(self, 'original_get_alt_field_info', None))\n self.assertIsNone(getattr(self.form, 'is_prepared', None))\n self.assertNotIn('good_practice_attrs', self.form.has_call)\n self.assertNotIn('get_overrides', self.form.has_call)\n self.assertNotIn('get_alt_field_info', self.form.has_call)\n good_practice = self.form.good_practice_attrs()\n if self.good_practice == 'empty':\n self.assertEqual({}, good_practice)\n overrides = self.form.get_overrides()\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual({}, overrides)\n elif self.overrides_empty_or_skip == 'skip':\n self.assertEqual(self.no_resize_override(), overrides)\n if self.alt_info == 'empty':\n self.assertEqual({}, self.form.get_alt_field_info())\n self.assertIn('get_alt_field_info', self.form.has_call)\n self.assertEqual(self.form.get_alt_field_info.__name__, 'empty_get_alt_field_info')\n self.assertIn('good_practice_attrs', self.form.has_call)\n self.assertIn('get_overrides', self.form.has_call)\n self.form.has_call = []\n self.assertEqual(self.form.good_practice_attrs.__name__, 'empty_good_practice_attrs')\n if self.overrides_empty_or_skip == 'empty':\n self.assertEqual(self.form.get_overrides.__name__, 'empty_get_overrides')\n self.assertEqual(self.form.get_overrides.__name__, 'skip_get_overrides')\n request_type = 'POST' if self.get_initial_data() else 'GET'\n self.assertEqual(request_type, self.request.method)", "def test_call_alt_params(self):\r\n otu_picker = BlastOtuPicker({'max_e_value': 1e-30})\r\n expected = {}\r\n actual = otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected)\r\n\r\n self.otu_picker = BlastOtuPicker(\r\n {'max_e_value': 1e-3, 'Similarity': 0.90})\r\n expected_90 = {'ref1': ['s3', 's2', 's1'],\r\n 'ref2': ['s4'],\r\n 'ref3': ['s5'],\r\n 'ref4': ['s6']}\r\n actual = self.otu_picker(self.seqs_fp,\r\n refseqs_fp=self.reference_seqs_fp)\r\n self.assertEqual(actual, expected_90)", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True", "def mocked_config_file_custom_provider():\n data = \"\"\"[YESSSSMS]\nLOGIN = 06501234567\nPASSWD = MySecre3tPassw0rd\nDEFAULT_TO = +43664123123123\n# MVNO = FANTASYMOBILE\n[YESSSSMS_PROVIDER_URLS]\nLOGIN_URL = mock://kontomanager.at/index.php\nLOGOUT_URL = mock://kontomanager.at/index.php?dologout=2\nKONTOMANAGER_URL = mock://kontomanager.at/kundendaten.php\nWEBSMS_FORM_URL = mock://kontomanager.at/websms.php\nSEND_SMS_URL = mock://kontomanager.at/websms_send.php\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def test_apply_startup_params(self):\n self.assertRaises(NotImplementedException,\n self.driver.apply_startup_params)", "def test_map_view_context_fields_values(\n self,\n mock_installation_statistics_model_overall_counts,\n mock_installation_statistics_model_data_per_period,\n mock_installation_statistics_model_timeline,\n mock_get_data_created_datetime_scope\n ): # pylint: disable=too-many-locals\n mock_timeline = ['2017-05-14', '2017-05-15', '2017-05-16']\n mock_students, mock_courses, mock_instances = [4124, 5122, 6412], [110, 211, 167], [30, 20, 25]\n mock_instances_count, mock_courses_count, mock_students_count, mock_certificates_count = 6412, 167, 25, 0\n mock_first_datetime_of_update_data = datetime(2017, 6, 1, 14, 56, 18)\n mock_last_datetime_of_update_data = datetime(2017, 7, 2, 23, 12, 8)\n\n mock_installation_statistics_model_timeline.return_value = mock_timeline\n\n mock_installation_statistics_model_data_per_period.return_value = mock_students, mock_courses, mock_instances\n\n mock_installation_statistics_model_overall_counts.return_value = {\n \"instances_count\": 6412,\n \"courses_count\": 167,\n \"students_count\": 25,\n \"generated_certificates_count\": 0,\n }\n\n mock_get_data_created_datetime_scope.return_value = \\\n mock_first_datetime_of_update_data, mock_last_datetime_of_update_data\n\n response = self.client.get('/')\n\n self.assertEqual(json.loads(response.context['timeline']), mock_timeline)\n self.assertEqual(json.loads(response.context['students']), mock_students)\n self.assertEqual(json.loads(response.context['courses']), mock_courses)\n self.assertEqual(json.loads(response.context['instances']), mock_instances)\n self.assertEqual(response.context['instances_count'], mock_instances_count)\n self.assertEqual(response.context['students_count'], mock_students_count)\n self.assertEqual(response.context['courses_count'], mock_courses_count)\n self.assertEqual(response.context['generated_certificates_count'], mock_certificates_count)\n self.assertEqual(response.context['first_datetime_of_update_data'], mock_first_datetime_of_update_data)\n self.assertEqual(response.context['last_datetime_of_update_data'], mock_last_datetime_of_update_data)", "def test_settings_proxy_properties_setting(parameters: Dict[str, Any]) -> None:\n settings = Settings()\n settings_proxy = settings.create_proxy()\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))\n\n for key, value in parameters.items():\n settings.__setattr__(key, value)\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))", "def test_config_device_init_with_defaults(get_config, monkeypatch):\n notbase_config = {'not_presented': 1}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', notbase_config)\n cfg = get_config(DeviceConfig, base_config)\n\n assert isinstance(cfg, DeviceConfig), 'wrong class'\n assert cfg.data == notbase_config, 'bad config loaded'", "def setUp(self):\n super().setUp()\n\n self.provider = Provider.objects.filter(type=Provider.PROVIDER_OCP).first()\n\n ocp_metric = metric_constants.OCP_METRIC_CPU_CORE_USAGE_HOUR\n ocp_source_type = Provider.PROVIDER_OCP\n tiered_rates = [{\"unit\": \"USD\", \"value\": 0.22}]\n self.ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": ocp_source_type,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}, \"tiered_rates\": tiered_rates}],\n \"currency\": \"USD\",\n }\n self.basic_model = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"markup\": {\"value\": 10, \"unit\": \"percent\"},\n \"rates\": [{\"metric\": {\"name\": ocp_metric}}],\n \"currency\": \"USD\",\n }", "async def test_basic_setup(\n hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: ProtectCamera\n):\n\n camera_high_only = mock_camera.copy(deep=True)\n camera_high_only._api = mock_entry.api\n camera_high_only.channels[0]._api = mock_entry.api\n camera_high_only.channels[1]._api = mock_entry.api\n camera_high_only.channels[2]._api = mock_entry.api\n camera_high_only.name = \"Test Camera 1\"\n camera_high_only.id = \"test_high\"\n camera_high_only.channels[0].is_rtsp_enabled = True\n camera_high_only.channels[0].name = \"High\"\n camera_high_only.channels[0].rtsp_alias = \"test_high_alias\"\n camera_high_only.channels[1].is_rtsp_enabled = False\n camera_high_only.channels[2].is_rtsp_enabled = False\n\n camera_medium_only = mock_camera.copy(deep=True)\n camera_medium_only._api = mock_entry.api\n camera_medium_only.channels[0]._api = mock_entry.api\n camera_medium_only.channels[1]._api = mock_entry.api\n camera_medium_only.channels[2]._api = mock_entry.api\n camera_medium_only.name = \"Test Camera 2\"\n camera_medium_only.id = \"test_medium\"\n camera_medium_only.channels[0].is_rtsp_enabled = False\n camera_medium_only.channels[1].is_rtsp_enabled = True\n camera_medium_only.channels[1].name = \"Medium\"\n camera_medium_only.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_medium_only.channels[2].is_rtsp_enabled = False\n\n camera_all_channels = mock_camera.copy(deep=True)\n camera_all_channels._api = mock_entry.api\n camera_all_channels.channels[0]._api = mock_entry.api\n camera_all_channels.channels[1]._api = mock_entry.api\n camera_all_channels.channels[2]._api = mock_entry.api\n camera_all_channels.name = \"Test Camera 3\"\n camera_all_channels.id = \"test_all\"\n camera_all_channels.channels[0].is_rtsp_enabled = True\n camera_all_channels.channels[0].name = \"High\"\n camera_all_channels.channels[0].rtsp_alias = \"test_high_alias\"\n camera_all_channels.channels[1].is_rtsp_enabled = True\n camera_all_channels.channels[1].name = \"Medium\"\n camera_all_channels.channels[1].rtsp_alias = \"test_medium_alias\"\n camera_all_channels.channels[2].is_rtsp_enabled = True\n camera_all_channels.channels[2].name = \"Low\"\n camera_all_channels.channels[2].rtsp_alias = \"test_low_alias\"\n\n camera_no_channels = mock_camera.copy(deep=True)\n camera_no_channels._api = mock_entry.api\n camera_no_channels.channels[0]._api = mock_entry.api\n camera_no_channels.channels[1]._api = mock_entry.api\n camera_no_channels.channels[2]._api = mock_entry.api\n camera_no_channels.name = \"Test Camera 4\"\n camera_no_channels.id = \"test_none\"\n camera_no_channels.channels[0].is_rtsp_enabled = False\n camera_no_channels.channels[0].name = \"High\"\n camera_no_channels.channels[1].is_rtsp_enabled = False\n camera_no_channels.channels[2].is_rtsp_enabled = False\n\n camera_package = mock_camera.copy(deep=True)\n camera_package._api = mock_entry.api\n camera_package.channels[0]._api = mock_entry.api\n camera_package.channels[1]._api = mock_entry.api\n camera_package.channels[2]._api = mock_entry.api\n camera_package.name = \"Test Camera 5\"\n camera_package.id = \"test_package\"\n camera_package.channels[0].is_rtsp_enabled = True\n camera_package.channels[0].name = \"High\"\n camera_package.channels[0].rtsp_alias = \"test_high_alias\"\n camera_package.channels[1].is_rtsp_enabled = False\n camera_package.channels[2].is_rtsp_enabled = False\n package_channel = camera_package.channels[0].copy(deep=True)\n package_channel.is_rtsp_enabled = False\n package_channel.name = \"Package Camera\"\n package_channel.id = 3\n package_channel.fps = 2\n package_channel.rtsp_alias = \"test_package_alias\"\n camera_package.channels.append(package_channel)\n\n mock_entry.api.bootstrap.cameras = {\n camera_high_only.id: camera_high_only,\n camera_medium_only.id: camera_medium_only,\n camera_all_channels.id: camera_all_channels,\n camera_no_channels.id: camera_no_channels,\n camera_package.id: camera_package,\n }\n await hass.config_entries.async_setup(mock_entry.entry.entry_id)\n await hass.async_block_till_done()\n\n assert_entity_counts(hass, Platform.CAMERA, 14, 6)\n\n # test camera 1\n entity_id = validate_default_camera_entity(hass, camera_high_only, 0)\n await validate_rtsps_camera_state(hass, camera_high_only, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_high_only, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_high_only, 0, entity_id)\n\n # test camera 2\n entity_id = validate_default_camera_entity(hass, camera_medium_only, 1)\n await validate_rtsps_camera_state(hass, camera_medium_only, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_medium_only, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_medium_only, 1, entity_id)\n\n # test camera 3\n entity_id = validate_default_camera_entity(hass, camera_all_channels, 0)\n await validate_rtsps_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 0, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 1)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 1, entity_id)\n\n entity_id = validate_rtsps_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsps_camera_state(hass, camera_all_channels, 2, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_all_channels, 2)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_all_channels, 2, entity_id)\n\n # test camera 4\n entity_id = validate_default_camera_entity(hass, camera_no_channels, 0)\n await validate_no_stream_camera_state(\n hass, camera_no_channels, 0, entity_id, features=0\n )\n\n # test camera 5\n entity_id = validate_default_camera_entity(hass, camera_package, 0)\n await validate_rtsps_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_rtsp_camera_entity(hass, camera_package, 0)\n await enable_entity(hass, mock_entry.entry.entry_id, entity_id)\n await validate_rtsp_camera_state(hass, camera_package, 0, entity_id)\n\n entity_id = validate_default_camera_entity(hass, camera_package, 3)\n await validate_no_stream_camera_state(\n hass, camera_package, 3, entity_id, features=0\n )", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia860\"] = Eia860Settings()\n values[\"eia861\"] = Eia861Settings()\n values[\"eia923\"] = Eia923Settings()\n\n return values", "async def test_form_multiple_services(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=FAKE_SERVICES):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_FORM\n assert result2[\"step_id\"] == \"service\"\n assert result2[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]]},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result3[\"title\"] == TEST_USERNAME\n assert result3[\"data\"] == FAKE_DATA\n assert result3[\"options\"] == {\n CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]],\n }\n assert len(mock_setup_entry.mock_calls) == 1", "def requires_mapping(self):", "def test_set_dict_value_3(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"C\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"C\")", "def test_real_value(self):\n setting_model = Setting(python_type='list', dry_value='')\n self.assertEqual(setting_model.value, [])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Lancelot,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Lancelot', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Patsy'])\n\n setting_model = Setting(python_type='list',\n dry_value='Arthur,Robin,Patsy',\n default_value='Arthur,Patsy')\n self.assertEqual(setting_model.value,\n ['Arthur', 'Robin', 'Patsy'])", "def used_implementation(self, impl: str, value):", "def test_input_identifier_types(self):\n # It's okay to set INPUT_IDENTIFIER_TYPES to None it means you\n # will cover any and all identifier types.\n class Base(IdentifierCoverageProvider):\n SERVICE_NAME = \"Test provider\"\n DATA_SOURCE_NAME = DataSource.GUTENBERG\n\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = None\n provider = MockProvider(self._db)\n assert None == provider.input_identifier_types\n\n # It's okay to set a single value.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = Identifier.ISBN\n provider = MockProvider(self._db)\n assert [Identifier.ISBN] == provider.input_identifier_types\n\n # It's okay to set a list of values.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = [Identifier.ISBN, Identifier.OVERDRIVE_ID]\n provider = MockProvider(self._db)\n assert ([Identifier.ISBN, Identifier.OVERDRIVE_ID] ==\n provider.input_identifier_types)\n\n # It's not okay to do nothing.\n class MockProvider(Base):\n pass\n with pytest.raises(ValueError) as excinfo:\n MockProvider(self._db)\n assert \"MockProvider must define INPUT_IDENTIFIER_TYPES, even if the value is None.\" in str(excinfo.value)", "def _verify_map(self):\n # ---- verify self.info ----\n if self.info['contype'] == NotImplemented:\n # 'contype' must be defined\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n elif self.info['contype'] not in ['motion',\n 'waveform',\n 'power']:\n # 'contype' must be one of specified type\n errstr = \"self.info['contype'] must be defined as:\\n\" \\\n \" 'motion', 'waveform', or 'power'\"\n raise NotImplementedError(errstr)\n\n # ---- verity self.configs ----\n # 'dataset fields' must be defined\n if 'dataset fields' not in self.configs:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n elif self.configs['dataset fields'] == NotImplemented:\n errstr = \"self.configs['dataset fields'] must be defined \" \\\n \"as:\\n [('field name', dtype), ]\"\n raise NotImplementedError(errstr)\n\n # 'dset field to numpy field' must be defined\n # - each 'dataset field' needs a mapping to a structured numpy\n # field for hdfReadControl\n # - 'dset field to numpy field' is a list of 3-element tuples\n # where each entry in the list corresponds to a dataset field\n # name\n # - the 3-element tuple must follow the format:\n #\n # self.configs['dset field to numpy field'][i] = (\n # str, # dataset field name\n # str, # corresponding structured numpy field name\n # int) # index of structured numpy field\n #\n # For example, the '6K Compumotor would look like...\n # self.configs['dset field to numpy'] = [\n # ('x', 'xyz', 0),\n # ('y', 'xyz', 1),\n # ('z', 'xyz', 2)]\n #\n key = 'dset field to numpy field'\n if key not in self.configs:\n raise NotImplementedError\n elif self.configs[key] == NotImplemented:\n raise NotImplementedError\n elif type(self.configs[key]) is not list:\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(isinstance(val, tuple)\n for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n elif not all(len(val) == 3 for val in self.configs[key]):\n errstr = \"self.configs['dset field to numpy field] must \" \\\n + \"be a list of 3-element tuples\"\n raise Exception(errstr)\n else:\n err = False\n dset_fields = [name\n for name, dftype\n in self.configs['dataset fields']]\n for dfname, npfname, npi in self.configs[key]:\n if dfname not in dset_fields:\n err = True\n break\n elif type(npfname) is not str:\n err = True\n break\n elif type(npi) is not int:\n err = True\n break\n if err:\n errstr = \"self.configs['dset field to numpy field] \" \\\n + \"must be a list of 3-element tuples\"\n raise Exception(errstr)\n\n # contype == 'motion' specific verification\n if self.contype == 'motion':\n # verify 'motion list'\n if 'motion list' not in self.configs:\n # 'motion list' exists\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['motion list'] == NotImplemented:\n # 'motion list' is defined\n errstr = \"self.configs['motion list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'motion list' must have its own config\n for name in self.configs['motion list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['motion \" \\\n \"name'] for each motion list in \" \\\n \"self.configs['motion list'] = \" \\\n \"[motion name, ]\"\n raise NotImplementedError(errstr)\n\n # verify 'probe list'\n if 'probe list' not in self.configs:\n # 'probe list' exists\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n elif self.configs['probe list'] == NotImplemented:\n # 'probe list' is defined\n errstr = \"self.configs['probe list'] must be defined\"\n raise NotImplementedError(errstr)\n else:\n # each 'probe list' must have its own config\n for name in self.configs['probe list']:\n if name not in self.configs:\n errstr = \"must defined self.configs['probe \" \\\n \"name'] for each probe in \" \\\n \"self.configs['probe list'] = \" \\\n \"[probe name, ]\"\n raise NotImplementedError(errstr)\n\n # delete 'config names' if present\n if 'config names' in self.configs:\n del self.configs['config names']\n\n # verify all other contypes\n if self.contype != 'motion':\n # remove 'motion list'\n if 'motion list' in self.configs:\n # remove 'motion list' children\n for name in self.configs['motion list']:\n if name in self.configs:\n del(self.configs[name])\n\n # remove 'motion list'\n del(self.configs['motion list'])\n\n # remove 'probe list'\n if 'probe list' in self.configs:\n # remove 'probe list' children\n for name in self.configs['probe list']:\n if name in self.configs:\n del (self.configs[name])\n\n # remove 'motion list'\n del (self.configs['probe list'])\n\n # verify 'command list'\n # if 'command list' not in self.configs:\n # # 'command list' exists\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)\n # elif self.configs['command list'] == NotImplemented:\n # # 'motion list' is defined\n # errstr = \"self.configs['command list'] must be \" \\\n # \"defined\"\n # raise NotImplementedError(errstr)", "def test_prep_overrides(self):\n original_data = self.form.data\n test_data = original_data.copy()\n test_data._mutable = False\n self.form.data = test_data # copied only to allow tear-down reverting to original.\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n self.form.fields = test_fields # copied to allow tear-down reverting to original.\n original_get_overrides = self.form.get_overrides\n def replace_overrides(): return self.formfield_attrs_overrides\n self.form.get_overrides = replace_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = {}\n overrides = self.formfield_attrs_overrides.copy()\n DEFAULT = overrides.pop('_default_')\n expected_attrs = {}\n for name, field in test_fields.items():\n attrs = field.widget.attrs.copy()\n if isinstance(field.widget, (RadioSelect, CheckboxSelectMultiple, CheckboxInput, )):\n pass # update if similar section in prep_fields is updated.\n attrs.update(overrides.get(name, {}))\n # TODO: setup structure for using default or defined version for all CharFields.\n no_resize = overrides.get(name, {}).pop('no_size_override', False)\n no_resize = True if isinstance(field.widget, (HiddenInput, MultipleHiddenInput)) else no_resize\n if no_resize:\n expected_attrs[name] = attrs\n continue # None of the following size overrides are applied for this field.\n if isinstance(field.widget, Textarea):\n width_attr_name = 'cols'\n default = DEFAULT.get('cols', None)\n display_size = attrs.get('cols', None)\n if 'rows' in DEFAULT:\n height = attrs.get('rows', None)\n height = min((DEFAULT['rows'], int(height))) if height else DEFAULT['rows']\n attrs['rows'] = str(height)\n if default: # For textarea, we always override. The others depend on different conditions.\n display_size = display_size or default\n display_size = min((int(display_size), int(default)))\n elif issubclass(field.__class__, CharField):\n width_attr_name = 'size' # 'size' is only valid for input types: email, password, tel, text\n default = DEFAULT.get('size', None) # Cannot use float(\"inf\") as an int.\n display_size = attrs.get('size', None)\n else: # This field does not have a size setting.\n width_attr_name, default, display_size = None, None, None\n input_size = attrs.get('maxlength', None)\n possible_size = [int(ea) for ea in (display_size or default, input_size) if ea]\n # attrs['size'] = str(int(min(float(display_size), float(input_size)))) # Can't use float(\"inf\") as an int.\n if possible_size and width_attr_name:\n attrs[width_attr_name] = str(min(possible_size))\n expected_attrs[name] = attrs\n # Expected:\n # formfield_attrs_overrides = {\n # '_default_': {'size': 15, 'cols': 20, 'rows': 4, },\n # 'first': {'maxlength': 191, 'size': 20, },\n # 'second': {'maxlength': 2, }, # 'size': 2,\n # 'last': {'maxlength': 2, 'size': 5, },\n # }\n result_fields = self.form.prep_fields()\n result_attrs = {name: field.widget.attrs.copy() for name, field in result_fields.items()}\n first_maxlength = expected_attrs['first']['maxlength'] # overrides['first']['maxlength']\n first_size = expected_attrs['first']['size'] # overrides['first']['size']\n second_maxlength = expected_attrs['second']['maxlength'] # overrides['second']['maxlength']\n last_maxlength = expected_attrs['last']['maxlength'] # overrides['last']['maxlength']\n last_size = expected_attrs['last']['size'] # overrides['last']['size']\n\n self.assertEqual(first_maxlength, result_fields['first'].widget.attrs.get('maxlength', None))\n self.assertEqual(first_size, result_fields['first'].widget.attrs.get('size', None))\n self.assertEqual(second_maxlength, result_fields['second'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_maxlength, result_fields['last'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_size, result_fields['last'].widget.attrs.get('size', None))\n for key, val in expected_attrs.items():\n self.assertEqual(val, result_attrs[key])\n self.assertDictEqual(expected_attrs, result_attrs)\n\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides", "def test__authenticate_with_custom_port(self):\n get_cloud_config_mock = [\n \"proxmox.connection.url\",\n \"9999\",\n \"fakeuser\",\n \"secretpassword\",\n True,\n ]\n requests_post_mock = MagicMock()\n with patch(\n \"salt.config.get_cloud_config_value\",\n autospec=True,\n side_effect=get_cloud_config_mock,\n ), patch(\"requests.post\", requests_post_mock):\n proxmox._authenticate()\n requests_post_mock.assert_called_with(\n \"https://proxmox.connection.url:9999/api2/json/access/ticket\",\n verify=True,\n data={\"username\": (\"fakeuser\",), \"password\": \"secretpassword\"},\n )", "def test_function_callparams(self):\n\n @Configurable(conf=category('', Parameter('test', value=True)))\n def twist(test=None):\n return test\n\n value = twist()\n\n self.assertTrue(value)\n\n value = twist(False)\n\n self.assertFalse(value)\n\n Configurable.get_annotations(twist)[0].conf['']['test'].value = 1\n\n value = twist()\n\n self.assertEqual(value, 1)", "def test_bad_ext_app_setting(self, mock_settings):\n lang, _ = Language.objects.get_or_create(name=\"Test Language\", code=\"text-x-lang\")\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": None})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": {}})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": ()})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": \" \"})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)\n\n mock_settings.configure_mock(**{\"EXT_APP_PUSH\": 1.0})\n\n status = notify_external_apps(instance=lang, action=\"UPDATE\")\n self.assertFalse(status.success)\n self.assertGreater(len(status.message), 0)", "def default_load_all(cls, values): # noqa: N805\n if not any(values.values()):\n values[\"eia\"] = EiaSettings()\n values[\"epacems\"] = EpaCemsSettings()\n values[\"ferc1\"] = Ferc1Settings()\n values[\"ferc714\"] = Ferc714Settings()\n values[\"glue\"] = GlueSettings()\n\n return values", "async def test_plenticore_async_setup_g1(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_apiclient: ApiClient,\n) -> None:\n mock_apiclient.get_settings = AsyncMock(\n return_value={\"scb:network\": [SettingsData({\"id\": \"Hostname\"})]}\n )\n mock_apiclient.get_setting_values = AsyncMock(\n # G1 model has the entry id \"Hostname\"\n return_value={\n \"devices:local\": {\n \"Properties:SerialNo\": \"12345\",\n \"Branding:ProductName1\": \"PLENTICORE\",\n \"Branding:ProductName2\": \"plus 10\",\n \"Properties:VersionIOC\": \"01.45\",\n \"Properties:VersionMC\": \"01.46\",\n },\n \"scb:network\": {\"Hostname\": \"scb\"},\n }\n )\n\n mock_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n plenticore = hass.data[DOMAIN][mock_config_entry.entry_id]\n\n assert plenticore.device_info == DeviceInfo(\n configuration_url=\"http://192.168.1.2\",\n identifiers={(DOMAIN, \"12345\")},\n manufacturer=\"Kostal\",\n model=\"PLENTICORE plus 10\",\n name=\"scb\",\n sw_version=\"IOC: 01.45 MC: 01.46\",\n )", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {0: ['cdhit_test_seqs_0'],\r\n 1: ['cdhit_test_seqs_1'],\r\n 2: ['cdhit_test_seqs_2'],\r\n 3: ['cdhit_test_seqs_3'],\r\n 4: ['cdhit_test_seqs_4'],\r\n 5: ['cdhit_test_seqs_5'],\r\n 6: ['cdhit_test_seqs_6'],\r\n 7: ['cdhit_test_seqs_7'],\r\n 8: ['cdhit_test_seqs_8'],\r\n 9: ['cdhit_test_seqs_9']}\r\n\r\n app = CdHitOtuPicker(params={})\r\n obs = app(self.tmp_seq_filepath1)\r\n self.assertEqual(obs, exp)", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def test_get_user_settings(client, jwt, session, keycloak_mock, monkeypatch): # pylint:disable=unused-argument\n user_model = factory_user_model(user_info=TestUserInfo.user_test)\n contact = factory_contact_model()\n contact_link = ContactLinkModel()\n contact_link.contact = contact\n contact_link.user = user_model\n contact_link.commit()\n kc_id = user_model.keycloak_guid\n\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id)\n claims['idp_userid'] = str(user_model.idp_userid)\n patch_token_info(claims, monkeypatch)\n\n OrgService.create_org(TestOrgInfo.org_branch_name, user_id=user_model.id)\n\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id}/settings', headers=headers, content_type='application/json')\n item_list = rv.json\n account = next(obj for obj in item_list if obj['type'] == 'ACCOUNT')\n assert account['accountType'] == 'BASIC'\n assert account['additionalLabel'] == TestOrgInfo.org_branch_name.get('branchName')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n assert account['productSettings'] == f'/account/{account[\"id\"]}/restricted-product'\n\n kc_id_no_user = TestUserInfo.user1.get('keycloak_guid')\n claims = copy.deepcopy(TestJwtClaims.updated_test.value)\n claims['sub'] = str(kc_id_no_user)\n patch_token_info(claims, monkeypatch)\n # post token with updated claims\n headers = factory_auth_header(jwt=jwt, claims=claims)\n rv = client.get(f'/api/v1/users/{kc_id_no_user}/settings', headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_200_OK\n assert schema_utils.validate(item_list, 'user_settings_response')[0]\n item_list = rv.json\n account = next((obj for obj in item_list if obj['type'] == 'ACCOUNT'), None)\n assert account is None\n user_profile = next(obj for obj in item_list if obj['type'] == 'USER_PROFILE')\n assert '/userprofile' in user_profile.get('urlpath')", "async def test_setup_multiple(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n bootstrap: Bootstrap,\n) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac\n\n nvr = bootstrap.nvr\n nvr._api = ufp.api\n nvr.mac = \"A1E00C826983\"\n nvr.id\n ufp.api.get_nvr = AsyncMock(return_value=nvr)\n\n with patch(\n \"homeassistant.components.unifiprotect.utils.ProtectApiClient\"\n ) as mock_api:\n mock_config = MockConfigEntry(\n domain=DOMAIN,\n data={\n \"host\": \"1.1.1.1\",\n \"username\": \"test-username\",\n \"password\": \"test-password\",\n \"id\": \"UnifiProtect\",\n \"port\": 443,\n \"verify_ssl\": False,\n },\n version=2,\n )\n mock_config.add_to_hass(hass)\n\n mock_api.return_value = ufp.api\n\n await hass.config_entries.async_setup(mock_config.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert mock_config.unique_id == ufp.api.bootstrap.nvr.mac", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 1\n strategy1.engine = 'afl'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def test_entities__FieldCustomization__default_value__1(address_book):\n fc = IFieldCustomization(address_book)\n assert u'Time zone' == fc.default_value(IAddressBook['time_zone'], 'label')", "def test_sanity(self, mock_provider):\n p = mock_provider()\n assert p.metadata == {'base_url': 'https://api.mock.com',\n 'provider_name': 'mock_provider',\n 'site_url': 'https://www.mock.com'}\n assert p.arguments == {\n 'not_required': {\n 'oneOf': [\n {'items': {'type': 'string'}, 'minItems': 1, 'type': 'array', 'uniqueItems': True},\n {'type': 'string'}\n ]\n },\n 'required': {'type': 'string'},\n 'message': {'type': 'string'},\n 'option_with_default': {'type': 'string'}\n }\n\n assert p.required == ['required']\n rsp = p.notify(**self.valid_data)\n assert isinstance(rsp, Response)\n assert not rsp.errors\n assert rsp.raise_on_errors() is None\n assert repr(rsp) == '<Response,provider=Mock_provider,status=success>'\n assert repr(p) == '<Provider:[Mock_provider]>'", "def test_get_setting(monkeypatch):\n resp = str(uuid.uuid4())\n arg = str(uuid.uuid4())\n kwarg = str(uuid.uuid4())\n get_secret = Mock(return_value=resp)\n monkeypatch.setattr(\"lambdautils.state.get_secret\", get_secret)\n resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)\n assert resp2 == resp\n get_secret.assert_called_with(arg, kwarg=kwarg)", "def _validate_provider(self, name_or_uuid, **kwargs):\n found = self.client._provider_tree.data(name_or_uuid)\n # If kwargs provided, their names indicate ProviderData attributes\n for attr, expected in kwargs.items():\n try:\n self.assertEqual(getattr(found, attr), expected)\n except AttributeError:\n self.fail(\"Provider with name or UUID %s doesn't have \"\n \"attribute %s (expected value: %s)\" %\n (name_or_uuid, attr, expected))", "def test_set(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )\n\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 1)\n obj = AppSetting.objects.get(name=setting_name, user=self.user)\n self.assertEqual(obj.get_value(), 'value')\n self.assertIsNone(\n ProjectEvent.objects.filter(\n event_name='app_setting_set_api'\n ).first()\n )", "def test_set_no_value(self):\n self.assertEqual(AppSetting.objects.count(), 0)\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)", "def valid_config(hass: HomeAssistant, requests_mock):\n requests_mock.get(\n \"https://api.vultr.com/v1/account/info?api_key=ABCDEFG1234567\",\n text=load_fixture(\"account_info.json\", \"vultr\"),\n )\n\n with patch(\n \"vultr.Vultr.server_list\",\n return_value=json.loads(load_fixture(\"server_list.json\", \"vultr\")),\n ):\n # Setup hub\n vultr.setup(hass, VALID_CONFIG)", "def test_patch_hyperflex_ucsm_config_policy(self):\n pass", "def test_claims_supported_not_set(self):\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], [])", "def test_bad_custom_params(self):\r\n bad_custom_params = ['test_custom_params: test_custom_param_value']\r\n self.xmodule.custom_parameters = bad_custom_params\r\n self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))\r\n self.xmodule.oauth_params = Mock()\r\n with self.assertRaises(LTIError):\r\n self.xmodule.get_input_fields()", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def test_default_customer_app_map_search(self):\n pass", "def test_inputs_suitor_prefs(resident_names, hospital_names, capacities, seed):\n\n _, _, match = _make_match(resident_names, hospital_names, capacities, seed)\n\n assert match._check_suitor_prefs()\n\n match.suitors[0].pref_names = [1, 2, 3]\n\n with pytest.raises(Exception):\n match._check_suitor_prefs()", "def test__VerificationFieldPlatform__value():\n for instance in VerificationFieldPlatform.INSTANCES.values():\n vampytest.assert_instance(instance.value, VerificationFieldPlatform.VALUE_TYPE)" ]
[ "0.7055314", "0.6939393", "0.63423544", "0.576975", "0.5697624", "0.5679179", "0.55829513", "0.5382714", "0.5345334", "0.5325174", "0.53106457", "0.5242109", "0.52059597", "0.5203102", "0.5186651", "0.5169315", "0.51328266", "0.51315385", "0.5126731", "0.5115003", "0.50994205", "0.5093414", "0.50820154", "0.5074523", "0.5061159", "0.50434846", "0.5037727", "0.50352234", "0.50327957", "0.5018553", "0.50175124", "0.50126547", "0.5012161", "0.50014883", "0.5001277", "0.4996847", "0.49957845", "0.4989725", "0.49859405", "0.4979983", "0.49636325", "0.49628103", "0.49526268", "0.49485505", "0.49423316", "0.49180043", "0.49076396", "0.49003863", "0.4899193", "0.48928812", "0.48889008", "0.48886302", "0.48882446", "0.4886075", "0.48834777", "0.48825428", "0.4880589", "0.4880027", "0.48777232", "0.48777232", "0.48669815", "0.4863833", "0.48586407", "0.48578447", "0.48539978", "0.48506185", "0.4849006", "0.48395336", "0.4838721", "0.48384526", "0.4837042", "0.48263034", "0.48259896", "0.48249537", "0.48199266", "0.48183042", "0.48163012", "0.48143882", "0.4813749", "0.4812405", "0.4809102", "0.48063162", "0.4802421", "0.48022398", "0.48009628", "0.4797333", "0.4797317", "0.4795819", "0.47936177", "0.47829607", "0.4781316", "0.47804084", "0.47801426", "0.47722742", "0.47705948", "0.4768243", "0.4761191", "0.4757908", "0.47506016", "0.47503716" ]
0.66155446
2
Ensure that if there's an HTTP failure while fetching metadata, we continue, using the metadata from the SAML assertion.
def test_register_http_failure(self): self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps({ 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', }) ) # Because we're getting details from the assertion, fall back to the initial set of details. self.USER_EMAIL = "[email protected]" self.USER_NAME = "Me Myself And I" self.USER_USERNAME = "myself" self._test_register()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_metadata_saml_not_authorized():\n\n responses.add(\n responses.GET,\n f\"{SERVICE_URL}/$metadata\",\n content_type='text/html; charset=utf-8',\n status=200)\n\n with pytest.raises(HttpError) as e_info:\n pyodata.Client(SERVICE_URL, requests)\n\n assert str(e_info.value).startswith('Metadata request did not return XML, MIME type:')", "def metadata():\n req = prepare_onelogin_request(request)\n saml_auth = init_saml_auth(req)\n settings = saml_auth.get_settings()\n metadata = settings.get_sp_metadata()\n errors = settings.validate_metadata(metadata)\n\n if len(errors) == 0:\n resp = make_response(metadata, 200)\n resp.headers[\"Content-Type\"] = \"text/xml\"\n else:\n resp = make_response(\", \".join(errors), 500)\n return resp", "def test_metadata_not_reachable():\n\n responses.add(\n responses.GET,\n f\"{SERVICE_URL}/$metadata\",\n content_type='text/html',\n status=404)\n\n with pytest.raises(HttpError) as e_info:\n pyodata.Client(SERVICE_URL, requests)\n\n assert str(e_info.value).startswith('Metadata request failed')", "def metadata():\n req = prepare_flask_request(request)\n auth = init_saml_auth(req)\n settings = auth.get_settings()\n sp_metadata = settings.get_sp_metadata()\n errors = settings.validate_metadata(sp_metadata)\n\n if len(errors) == 0:\n resp = make_response(sp_metadata, 200)\n resp.headers[\"Content-Type\"] = \"text/xml\"\n else:\n resp = make_response(\", \".join(errors), 500)\n return resp", "def sso_saml_metadata(request, idp_slug):\n saml_settings = OneLogin_Saml2_Settings(get_saml2_config(request.idp))\n metadata = saml_settings.get_sp_metadata()\n errors = saml_settings.validate_metadata(metadata)\n\n if len(errors) == 0:\n resp = HttpResponse(content=metadata, content_type='text/xml')\n else:\n resp = HttpResponseServerError(content=', '.join(errors))\n return resp", "def _verify_metadata(self, subject_meta):\n # NOTE: admins can see subject metadata in the v1 API, but shouldn't\n # be able to download the actual subject data.\n if subject_meta['status'] == 'deleted' and subject_meta['deleted']:\n raise exception.NotFound()\n\n if not subject_meta['size']:\n # override subject size metadata with the actual cached\n # file size, see LP Bug #900959\n subject_meta['size'] = self.cache.get_subject_size(subject_meta['id'])", "def test_missing_metadata(self):\n responses.add(responses.POST, self.endpoint, status=201)\n with self.assertRaises(QuarantinableError):\n with self.assertLogs() as cm:\n processor.process(encrypt(test_data['missing_metadata']))\n self.assertIn('Decrypted json missing metadata. Quarantining message', cm.output[0])", "async def test_fetch_dataset_metadata_call_exception(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = ConnectionException()\n with self.assertRaises(aiohttp.web_exceptions.HTTPInternalServerError):\n await fetch_dataset_metadata(pool, None, None)", "def _load_metadata(self, datapath):\n try:\n metadata = Metadata(datapath)\n return metadata\n except RuntimeError:\n print('Metadata does not exist. Please double check your datapath.')\n return None", "def testMetadata(self):\n self.assertGreater(len(self.unauth.metadata(self.dataset)), 0)\n self.assertGreater(len(self.auth.metadata(self.dataset)), 0)", "def test_http_get_metadata_non_200_status_code(self, mock_urllib2):\n mock_response = Mock(name=\"Always non-200 Status Code\")\n mock_response.getcode.return_value = 400\n mock_urllib2.return_value = mock_response\n with self.assertRaises(IOError) as exception:\n ef_utils.http_get_metadata(\"ami-id\")\n self.assertIn(\"Non-200 response\", str(exception.exception))", "def load_idp_metadata(self, url=None):\n self._logger.info(\"Started loading IdP XML metadata from {0}\".format(url))\n\n try:\n xml_metadata = OneLogin_Saml2_IdPMetadataParser.get_metadata(url)\n except Exception as exception:\n raise SAMLMetadataLoadingError(inner_exception=exception)\n\n self._logger.info(\"Finished loading IdP XML metadata from {0}\".format(url))\n\n return xml_metadata", "def test_http_get_metadata_200_status_code(self, mock_urllib2):\n mock_response = Mock(name=\"Always 200 Status Code\")\n mock_response.getcode.return_value = 200\n mock_response.read.return_value = \"ami-12345678\"\n mock_urllib2.return_value = mock_response\n response = ef_utils.http_get_metadata(\"ami-id\")\n self.assertEquals(response, \"ami-12345678\")", "def _check_azure_metadata_service() -> None:\n try:\n jsn = requests.get(\n AZURE_METADATA_SERVICE_INSTANCE_URL,\n params={\"api-version\": \"2021-02-01\"},\n headers={\"Metadata\": \"true\"},\n timeout=2,\n ).json()\n if \"compute\" not in jsn or \"azEnvironment\" not in jsn[\"compute\"]:\n raise AirflowException(\n f\"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}\"\n )\n except (requests_exceptions.RequestException, ValueError) as e:\n raise AirflowException(f\"Can't reach Azure Metadata Service: {e}\")", "def test_metadata(self):\n d = self.contentStore.storeObject(\n 'blah', 'blah', metadata={'blah': 'blah'})\n return self.assertFailure(d, NotImplementedError\n ).addCallback(lambda e: self.assertSubstring('metadata', str(e)))", "def _process_fetch_failure(self):\n logger.info('DataFetcher: No valid result is received')\n if len(self.urls_processed) == len(self.urls):\n raise NoDataReceivedFromCaster()\n for _, error_code, error_text in self._curls_failed:\n if error_code == PYCURL_TIMEOUT_ERRNO:\n raise ExceededTimeoutError(error_text)\n if self._curls_failed:\n _, _, error_text = self._curls_failed[0]\n raise UnableToConnect(error_text)\n raise NoDataReceivedFromCaster()", "def finish_metadata(self, parent, metadata, reply_content):\n metadata['status'] = reply_content['status']\n if reply_content['status'] == 'error':\n if reply_content['ename'] == 'UnmetDependency':\n metadata['dependencies_met'] = False\n metadata['engine_info'] = self.get_engine_info()\n\n return metadata", "def test_validate_metadata_pass(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-passing-batch\",\n \"samples\": [\n \"PTC_EXPn200908LL_L2000001\",\n \"PTC_EXPn200908LL_L2000002\",\n \"PTC_EXPn200908LL_L2000003\"\n ],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNone(reason)\n\n # should not call to slack webhook\n verify(libslack.http.client.HTTPSConnection, times=0).request(...)", "def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)", "def lazy_check(self):\n if getattr(self, 'lazy_failure', None):\n exc_type, original_exception, traceback_part_text = self.lazy_failure\n msg = original_exception.args[0]\n original_exception = self.LazyAssertionError(\n '\\n'.join((\n '\\nUse the first frame of this real traceback and ignore the previous lazy traceback:',\n traceback_part_text,\n '{}: {}'.format(exc_type.__name__, msg),\n )),\n *original_exception.args[1:]\n )\n raise original_exception", "def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()", "async def _a_check_azure_metadata_service(self):\n try:\n async with self._session.get(\n url=AZURE_METADATA_SERVICE_INSTANCE_URL,\n params={\"api-version\": \"2021-02-01\"},\n headers={\"Metadata\": \"true\"},\n timeout=2,\n ) as resp:\n jsn = await resp.json()\n if \"compute\" not in jsn or \"azEnvironment\" not in jsn[\"compute\"]:\n raise AirflowException(\n f\"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}\"\n )\n except (requests_exceptions.RequestException, ValueError) as e:\n raise AirflowException(f\"Can't reach Azure Metadata Service: {e}\")", "def metadata_callback(_request, _uri, headers):\n return (200, headers, self.read_data_file('testshib_metadata.xml')) # lint-amnesty, pylint: disable=no-member", "def test_read_http_error(self, data, requests_mock, capsys):\n requests_mock.get(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.read(data_url)\n assert 'HTTP error: 300' in capsys.readouterr().out", "def resolve_failure(self):\n\t\tpass", "def fetch_saml_validation(self, ticket):\n\n headers = {\n 'soapaction': 'http://www.oasis-open.org/committees/security',\n 'cache-control': 'no-cache',\n 'pragma': 'no-cache',\n 'accept': 'text/xml',\n 'connection': 'keep-alive',\n 'content-type': 'text/xml; charset=utf-8',\n }\n params = {'TARGET': self.service_url}\n saml_validate_url = urllib_parse.urljoin(\n self.server_url, 'samlValidate',\n )\n return self.session.post(\n saml_validate_url,\n self.get_saml_assertion(ticket),\n params=params,\n headers=headers)", "def validate_metadata(self):\n metadata = self.get_client_metadata()\n\n return True", "def validate_metadata(self):\n\n # check sampling rate\n if self.has_data():\n # check start time\n if self.start != self.run_metadata.time_period.start:\n if (\n self.run_metadata.time_period.start\n != \"1980-01-01T00:00:00+00:00\"\n ):\n msg = (\n f\"start time of dataset {self.start} does not \"\n f\"match metadata start {self.run_metadata.time_period.start} \"\n f\"updating metatdata value to {self.start}\"\n )\n self.logger.warning(msg)\n self.run_metadata.time_period.start = self.start.iso_str\n\n # check end time\n if self.end != self.run_metadata.time_period.end:\n if (\n self.run_metadata.time_period.end\n != \"1980-01-01T00:00:00+00:00\"\n ):\n msg = (\n f\"end time of dataset {self.end} does not \"\n f\"match metadata end {self.run_metadata.time_period.end} \"\n f\"updating metatdata value to {self.end}\"\n )\n self.logger.warning(msg)\n self.run_metadata.time_period.end = self.end.iso_str\n if self.sample_rate != self.run_metadata.sample_rate:\n if self.run_metadata.sample_rate == 0.0:\n pass\n elif self.run_metadata.sample_rate is not None:\n msg = (\n f\"sample rate of dataset {self.sample_rate} does not \"\n f\"match metadata sample rate {self.run_metadata.sample_rate} \"\n f\"updating metatdata value to {self.sample_rate}\"\n )\n self.logger.warning(msg)\n self.run_metadata.sample_rate = self.sample_rate\n\n if self.run_metadata.id not in self.station_metadata.runs.keys():\n self.station_metadata.runs[0].update(self.run_metadata)\n\n self.station_metadata.update_time_period()\n self.survey_metadata.update_time_period()", "def test_api_can_get_metadata(self):\n response = self.client.get('/metadata/', format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def collect_result_metadata(metadata):\n try:\n yield\n metadata['test_status'] = 'succeeded'\n except Exception as exn:\n metadata['test_status'] = 'failed'\n metadata['exception'] = {\n 'class': exn.__class__.__name__,\n 'message': str(exn)\n }\n raise", "def check_response_errors(self, resp):\n return True", "def test_that_metadata_is_fetched(self):\n # Get value of 'param' parameter.\n param_name = 'param'\n metadata_url = '%s/%s' % (\n gce_metadata_services.METADATA_ATTRIBUTES_URL, param_name)\n metadata_headers = gce_metadata_services.METADATA_HEADERS\n metadata_value = 'value'\n\n with self.put_get_request(\n metadata_url, metadata_value, 200, metadata_headers):\n value = gce_metadata_services.get_metadata_param(param_name)\n self.assertEqual(value, 'value')", "async def async_update_from_icecast(self):\n if self._icecast_meta == 'Off':\n return True\n\n #_LOGGER.debug('For: %s Looking for IceCast metadata in: %s', self._name, self._media_uri_final)\n\n def NiceToICY(self):\n class InterceptedHTTPResponse():\n pass\n import io\n line = self.fp.readline().replace(b\"ICY 200 OK\\r\\n\", b\"HTTP/1.0 200 OK\\r\\n\")\n InterceptedSelf = InterceptedHTTPResponse()\n InterceptedSelf.fp = io.BufferedReader(io.BytesIO(line))\n InterceptedSelf.debuglevel = self.debuglevel\n InterceptedSelf._close_conn = self._close_conn\n return ORIGINAL_HTTP_CLIENT_READ_STATUS(InterceptedSelf)\n\n ORIGINAL_HTTP_CLIENT_READ_STATUS = urllib.request.http.client.HTTPResponse._read_status\n urllib.request.http.client.HTTPResponse._read_status = NiceToICY\n\n try:\n request = urllib.request.Request(self._media_uri_final, headers={'Icy-MetaData': '1','User-Agent': 'VLC/3.0.16 LibVLC/3.0.16'}) # request metadata\n response = await self.hass.async_add_executor_job(urllib.request.urlopen, request)\n except: # (urllib.error.HTTPError)\n _LOGGER.debug('For: %s Metadata error: %s', self._name, self._media_uri_final)\n self._media_title = None\n self._media_artist = None\n self._icecast_name = None\n self._media_image_url = None\n return True\n\n icy_name = response.headers['icy-name']\n if icy_name is not None and icy_name != 'no name' and icy_name != 'Unspecified name' and icy_name != '-':\n try: # 'latin1' # default: iso-8859-1 for mp3 and utf-8 for ogg streams\n self._icecast_name = icy_name.encode('latin1').decode('utf-8')\n except (UnicodeDecodeError):\n self._icecast_name = icy_name\n #_LOGGER.debug('For: %s found icy_name: %s', self._name, '\"' + icy_name + '\"')\n\n else:\n self._icecast_name = None\n\n if self._icecast_meta == 'StationName':\n self._media_title = self._icecast_name\n self._media_artist = None\n self._media_image_url = None\n return True\n\n icy_metaint_header = response.headers['icy-metaint']\n if icy_metaint_header is not None:\n metaint = int(icy_metaint_header)\n for _ in range(10): # title may be empty initially, try several times\n response.read(metaint) # skip to metadata\n metadata_length = struct.unpack('B', response.read(1))[0] * 16 # length byte\n metadata = response.read(metadata_length).rstrip(b'\\0')\n #_LOGGER.debug('For: %s found metadata: %s', self._name, metadata)\n\n # extract title from the metadata\n # m = re.search(br\"StreamTitle='([^']*)';\", metadata)\n m = re.search(br\"StreamTitle='(.*)';\", metadata)\n #_LOGGER.debug('For: %s found m: %s', self._name, m)\n if m:\n title = m.group(0)\n #_LOGGER.debug('For: %s found title: %s', self._name, title)\n\n if title:\n code_detect = chardet.detect(title)['encoding']\n title = title.decode(code_detect, errors='ignore')\n titlek = title.split(\"';\")\n title = titlek[0]\n titlem = title.split(\"='\")\n title = titlem[1]\n #_LOGGER.debug('For: %s found decoded title: %s', self._name, title)\n\n title = re.sub(r'\\[.*?\\]\\ *', '', title) # \"\\s*\\[.*?\\]\\s*\",\" \",title)\n if title.find('~~~~~') != -1: # for United Music Subasio servers\n titles = title.split('~')\n self._media_artist = string.capwords(titles[0].strip().strip('-'))\n self._media_title = string.capwords(titles[1].strip().strip('-'))\n elif title.find(' - ') != -1: # for ordinary Icecast servers\n titles = title.split(' - ')\n self._media_artist = string.capwords(titles[0].strip().strip('-'))\n self._media_title = string.capwords(titles[1].strip().strip('-'))\n else:\n if self._icecast_name is not None:\n self._media_artist = '[' + self._icecast_name + ']'\n else:\n self._media_artist = None\n self._media_title = string.capwords(title)\n\n if self._media_artist == '-':\n self._media_artist = None\n if self._media_title == '-':\n self._media_title = None\n\n if self._media_artist is not None:\n self._media_artist.replace('/', ' / ')\n self._media_artist.replace(' ', ' ')\n\n if self._media_title is not None:\n self._media_title.replace('/', ' / ')\n self._media_title.replace(' ', ' ')\n\n break\n else:\n if self._icecast_name is not None:\n self._media_title = self._icecast_name\n else:\n self._media_title = None\n self._media_artist = None\n self._media_image_url = None\n\n else:\n if self._icecast_name is not None:\n self._media_title = self._icecast_name\n else:\n self._media_title = None\n self._media_artist = None\n self._media_image_url = None\n\n #_LOGGER.debug('For: %s stated media_title: %s', self._name, self._media_title)\n #_LOGGER.debug('For: %s stated media_artist: %s', self._name, self._media_artist)", "def test_errors_on_metadata(self):\n mb = self.maria_backup\n\n # correct run\n sample_xtrabackup_info = \"\"\"\n uuid = a6336c86-1b33-11e8-8de6-080027f3a5d8\n tool_name = xtrabackup\n tool_command = –backup –target-dir=/Backups/base –datadir=/var/lib/mysql –user=root –password=… –galera-info\n tool_version = 2.4.9\n ibbackup_version = 2.4.9\n server_version = 5.7.19-17-57-log\n start_time = 2022-02-26 15:28:22\n end_time = 2022-02-26 15:28:48\n \"\"\"\n mock = mock_open(read_data=sample_xtrabackup_info)\n with patch('builtins.open', mock):\n self.assertFalse(mb.errors_on_metadata(''))\n\n # missing end_time\n sample_xtrabackup_info = \"\"\"\n uuid = a6336c86-1b33-11e8-8de6-080027f3a5d8\n tool_name = xtrabackup\n tool_command = –backup –target-dir=/Backups/base –datadir=/var/lib/mysql –user=root –password=… –galera-info\n tool_version = 2.4.9\n ibbackup_version = 2.4.9\n server_version = 5.7.19-17-57-log\n start_time = 2022-02-26 15:28:22\n \"\"\"\n mock = mock_open(read_data=sample_xtrabackup_info)\n with patch('builtins.open', mock):\n self.assertTrue(mb.errors_on_metadata(''))\n\n # simulate open error\n mock = mock_open(read_data=\"\")\n with patch('builtins.open', mock) as mocked_open:\n mocked_open.side_effect = OSError\n self.assertTrue(mb.errors_on_metadata(''))\n\n # simulate read error\n mock = mock_open(read_data=\"\")\n with patch('builtins.open', mock) as mocked_open:\n mocked_file = mocked_open.return_value\n mocked_file.read.side_effect = IOError\n self.assertTrue(mb.errors_on_metadata(''))", "def _extract_publication_metadata(self, feed, publication, data_source_name):\n self._logger.debug(\n \"Started extracting metadata from publication {0}\".format(\n encode(publication)\n )\n )\n\n title = publication.metadata.title\n\n if title == OPDSFeed.NO_TITLE:\n title = None\n\n subtitle = publication.metadata.subtitle\n\n languages = first_or_default(publication.metadata.languages)\n derived_medium = self._extract_medium_from_links(publication.links)\n medium = self._extract_medium(publication, derived_medium)\n\n publisher = first_or_default(publication.metadata.publishers)\n if publisher:\n publisher = publisher.name\n\n imprint = first_or_default(publication.metadata.imprints)\n if imprint:\n imprint = imprint.name\n\n published = publication.metadata.published\n subjects = self._extract_subjects(publication.metadata.subjects)\n contributors = (\n self._extract_contributors(\n publication.metadata.authors, Contributor.AUTHOR_ROLE\n )\n + self._extract_contributors(\n publication.metadata.translators, Contributor.TRANSLATOR_ROLE\n )\n + self._extract_contributors(\n publication.metadata.editors, Contributor.EDITOR_ROLE\n )\n + self._extract_contributors(\n publication.metadata.artists, Contributor.ARTIST_ROLE\n )\n + self._extract_contributors(\n publication.metadata.illustrators, Contributor.ILLUSTRATOR_ROLE\n )\n + self._extract_contributors(\n publication.metadata.letterers, Contributor.LETTERER_ROLE\n )\n + self._extract_contributors(\n publication.metadata.pencilers, Contributor.PENCILER_ROLE\n )\n + self._extract_contributors(\n publication.metadata.colorists, Contributor.COLORIST_ROLE\n )\n + self._extract_contributors(\n publication.metadata.inkers, Contributor.INKER_ROLE\n )\n + self._extract_contributors(\n publication.metadata.narrators, Contributor.NARRATOR_ROLE\n )\n + self._extract_contributors(\n publication.metadata.contributors, Contributor.CONTRIBUTOR_ROLE\n )\n )\n\n feed_self_url = first_or_default(\n feed.links.get_by_rel(OPDS2LinkRelationsRegistry.SELF.key)\n ).href\n links = self._extract_links(publication, feed_self_url)\n\n last_opds_update = publication.metadata.modified\n\n identifier = self._extract_identifier(publication)\n identifier_data = IdentifierData(\n type=identifier.type, identifier=identifier.identifier\n )\n\n # FIXME: There are no measurements in OPDS 2.0\n measurements = []\n\n # FIXME: There is no series information in OPDS 2.0\n series = None\n series_position = None\n\n # FIXME: It seems that OPDS 2.0 spec doesn't contain information about rights so we use the default one\n rights_uri = RightsStatus.rights_uri_from_string(\"\")\n\n circulation_data = CirculationData(\n default_rights_uri=rights_uri,\n data_source=data_source_name,\n primary_identifier=identifier_data,\n links=links,\n licenses_owned=LicensePool.UNLIMITED_ACCESS,\n licenses_available=LicensePool.UNLIMITED_ACCESS,\n licenses_reserved=0,\n patrons_in_hold_queue=0,\n formats=[],\n )\n\n formats = self._find_formats_in_non_open_access_acquisition_links(\n publication.links, links, rights_uri, circulation_data\n )\n circulation_data.formats.extend(formats)\n\n metadata = Metadata(\n data_source=data_source_name,\n title=title,\n subtitle=subtitle,\n language=languages,\n medium=medium,\n publisher=publisher,\n published=published,\n imprint=imprint,\n primary_identifier=identifier_data,\n subjects=subjects,\n contributors=contributors,\n measurements=measurements,\n series=series,\n series_position=series_position,\n links=links,\n data_source_last_updated=last_opds_update,\n circulation=circulation_data,\n )\n\n self._logger.debug(\n \"Finished extracting metadata from publication {0}: {1}\".format(\n encode(publication), encode(metadata)\n )\n )\n\n return metadata", "def test_validate_metadata_no_samples(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-no-samples-batch\",\n \"samples\": [],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)", "def test_handle_part__url_expired_twice(self, syn):\n\n upload = self._init_upload_attempt(syn)\n upload._upload_id = \"123\"\n part_number = 1\n chunk = b\"1\" * TestUploadAttempt.part_size\n\n pre_signed_url_1 = \"https://foo.com/1\"\n pre_signed_url_2 = \"https://bar.com/1\"\n signed_headers = {\"a\": 1}\n\n upload._pre_signed_part_urls = {part_number: (pre_signed_url_1, signed_headers)}\n mock_session = mock.Mock()\n\n with mock.patch.object(\n multipart_upload, \"_get_file_chunk\"\n ) as chunk_fn, mock.patch.object(\n upload, \"_get_thread_session\"\n ) as get_session, mock.patch.object(\n upload, \"_refresh_pre_signed_part_urls\"\n ) as refresh_urls:\n get_session.return_value = mock_session\n chunk_fn.return_value = chunk\n refresh_urls.side_effect = [\n (url, signed_headers)\n for url in [\n pre_signed_url_1,\n pre_signed_url_2,\n ]\n ]\n\n mock_response = mock.MagicMock(spec=requests.Response)\n mock_response.status_code = 403\n mock_response.headers = {}\n mock_response.reason = \"\"\n mock_session.put.return_value = mock_response\n\n with pytest.raises(SynapseHTTPError):\n upload._handle_part(1)", "def test_read_unexpected_error(self, data, requests_mock, capsys):\n requests_mock.get(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.read(data_url)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def parse_remote_metadata(self, timeout=30):\n for metadataUrl in self.metadataUrls:\n if (\n metadataUrl[\"url\"] is not None and metadataUrl[\"format\"].lower() == \"text/xml\"\n ):\n try:\n content = openURL(metadataUrl[\"url\"], timeout=timeout, headers=self.headers, auth=self.auth)\n doc = etree.fromstring(content.read())\n\n if metadataUrl[\"type\"] == \"FGDC\":\n mdelem = doc.find(\".//metadata\")\n if mdelem is not None:\n metadataUrl[\"metadata\"] = Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n elif metadataUrl[\"type\"] in [\"TC211\", \"19115\", \"19139\"]:\n mdelem = doc.find(\n \".//\" + nspath_eval(\"gmd:MD_Metadata\", namespaces)\n ) or doc.find(\n \".//\" + nspath_eval(\"gmi:MI_Metadata\", namespaces)\n )\n if mdelem is not None:\n metadataUrl[\"metadata\"] = MD_Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n except Exception:\n metadataUrl[\"metadata\"] = None", "def test_metadata(self):\n cr = CaseReader(self.filename)\n self.assertEqual(cr.format_version, format_version,\n msg='incorrect format version')\n self.assertIsNone(cr.parameters,\n msg='parameter metadata should be None')\n self.assertIsNone(cr.unknowns, msg='unknown metadata should be None')", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "async def test_fetch_dataset_metadata_call(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection()\n result = await fetch_dataset_metadata(pool, None, None)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, [])", "def test_validate_metadata_no_override_cycles(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-no-override-cycles-batch\",\n \"samples\": [\n \"PTC_EXPn200908LL_L2000001\",\n \"PTC_EXPn200908LL_L2000002\",\n \"PTC_EXPn200908LL_L2000003\"\n ],\n \"settings\": {\n \"adapter_read_1\": \"AAAACAACT\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)", "def on_failure(self, exc: BaseException) -> None:", "def test_get_metadata_value(self):\n pass", "def test_obtain_issues_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0", "def _verify(self) -> None:\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.data_dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.data_dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()", "def populate_initial_valid_metadata(self):\n pass", "async def _handle_fetch_failure(self, title, description):\n\n if not self.group.get('alert_on_failure', False):\n return\n\n # if we've been asked to alert on failure, we create\n # a fake event (as a Box) and alert with it\n now = pendulum.now('UTC')\n await self.alert(Box({\n 'title': title,\n 'description': description,\n 'published': now,\n 'datestring': self.format_timestamp_local(now),\n }))", "def _download_metadata():\n if not os.path.isfile(L1000FWD_METADATA):\n if not os.path.exists('L1000FWD'):\n os.mkdir('L1000FWD')\n response = requests.get('https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv', stream=True)\n if response.status_code != 200:\n raise Exception('This should not happen')\n with open(L1000FWD_METADATA, 'wb') as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n outfile.write(chunk)", "def test_obtain_issue_ssast_report_not_created(self, mock_sleep, mock_error, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}'] + ['{\"status\": {\"value\": \"InProgress\"}}'] * 5\n self.__report.obtain_issues(['id'], 'high')\n\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)\n mock_error.assert_called_once_with(\"SAST report is not created on the Checkmarx server!\")\n mock_sleep.assert_called()", "def test_get_server_metadata_item(self):\n metadata_response = self.servers_client.get_server_metadata_item(\n self.server.id, 'meta_key_1')\n metadata = metadata_response.entity\n self.assertEqual(metadata.get('meta_key_1'), 'meta_value_1')", "def _check_200(self, response):\n if response.code != 200:\n raise YubiKeyVerificationError(\n \"Received {0} response.\".format(response.code))\n return response", "def test_private_fetch_law_exception(self, mock_requests):\n\n mock_requests.get.return_value.status_code = 404\n setattr(self.law, 'silent', True)\n with self.assertRaises(SystemExit):\n self.law._fetch_law(('test', 'test'), Mock())", "def _identify_fail(failure):\n logger.warning(failure.getErrorMessage())\n logger.warning(\"Failed to setup & obtain identity\")\n return", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_auth_failure_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey') + \"1234\",\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertEqual(\n response,\n None,\n \"Authentication did not return 'None', but %s instead.\" % (\n response\n )\n )", "def test_update_metadata(self):\n pass", "def test_get_model_metadata_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name=\"asdf\")\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def _download_metadata(track_id, dataset_version):\n metadata_path = os.path.join(METADATA_PATH, _METADATA_FMT % track_id)\n if os.path.exists(metadata_path):\n return True\n\n try:\n top_folderid = GDRIVE_FOLDERS[dataset_version]\n except KeyError:\n raise IOError(\"Unable to find data in Google Drive for this version.\")\n\n file_list = get_named_child(top_folderid, track_id)\n correct_file = [f for f in file_list if f['title'] == track_id]\n\n if len(correct_file) == 0:\n raise IOError(\"Could not find multitrack\")\n else:\n mtrack_file = correct_file[0]\n\n metadata_file_list = get_named_child(mtrack_file['id'], 'METADATA')\n if len(metadata_file_list) > 0:\n metadata_file = metadata_file_list[0]\n else:\n folder_file_list = get_files_in_folder(mtrack_file['id'])\n print(len(folder_file_list))\n for fobject in folder_file_list:\n print(fobject['title'])\n raise IOError(\"Could not find Metadata\")\n\n download_file(metadata_file['id'], metadata_path)\n\n DOWNLOADED_FILEPATHS.append(metadata_path)\n\n return True", "def test_fetch_error_valid():\n ident = _id()\n proj.fetch('always_error', ident)\n error = proj.fetch_error('always_error', ident)\n assert \"This is an error!\" in error", "def test_metadata_invalid(aquarius_instance):\n result, errors = aquarius_instance.validate_metadata(\n {\"some_dict\": \"that is invalid\"}\n )\n assert result is False\n assert errors[0][\"message\"] == \"'main' is a required property\"", "def _ensure_not_expired(self, metadata_role):\n \n # Construct the full metadata filename and the location of its\n # current path. The current path of 'metadata_role' is needed\n # to log the exact filename of the expired metadata.\n metadata_filename = metadata_role + '.txt'\n rolepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n rolepath = os.path.abspath(rolepath)\n \n # Extract the expiration time.\n expires = self.metadata['current'][metadata_role]['expires']\n \n # If the current time has surpassed the expiration date, raise\n # an exception. 'expires' is in YYYY-MM-DD HH:MM:SS format, so\n # convert it to seconds since the epoch, which is the time format\n # returned by time.time() (i.e., current time), before comparing.\n current_time = time.time()\n expiry_time = tuf.formats.parse_time(expires)\n if expiry_time < current_time:\n logger.error('Metadata '+repr(rolepath)+' expired on '+repr(expires)+'.')\n raise tuf.ExpiredMetadataError(expires)", "def testsetMetadata(self):\n handler = Handler(self.tmp_url, \"\", \"png\", **self.kw)\n self.assertRaises(NotImplementedError, handler.setMetadata)", "def check_dataset(self):\n url = self.metadata.metadata_url\n document = Document.objects.get(\n metadata=self.metadata,\n document_type=DocumentEnum.METADATA.value,\n is_original=True,\n )\n original_document = document.content\n self.check_document(url, original_document)", "async def test_update_with_failed_get(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n headers={\"content-type\": \"text/xml\"},\n content=\"\",\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes\": [\"key\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == STATE_UNKNOWN\n assert \"REST xml result could not be parsed\" in caplog.text\n assert \"Empty reply\" in caplog.text", "def _parse_take_response(self, response: str) -> Optional[Package]:\n for source_package, descriptor in yaml.safe_load(response)[0].items():\n data = {} # type: Dict[str, str]\n for elem in descriptor:\n for k, v in elem.items():\n data[k] = v\n break\n if data['status'] != 'ok':\n return None\n return Package(self, source_package, data)", "def _raise_if_error(response):\n if response.status_code != 200:\n raise SimpleHTTPException(response)", "def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))", "def test_handler_metadata_validation_fail(self):\n\n # This will fail metadata validation since there exists no samples\n when(liborca).get_sample_names_from_samplesheet(...).thenReturn(\n [\n \"\"\n ]\n )\n\n result = bcl_convert.handler({\n 'gds_volume_name': \"gds_volume_name\",\n 'gds_folder_path': \"gds_folder_path\",\n 'seq_run_id': \"mock_sqr.run_id\",\n 'seq_name': \"mock_sqr.name\",\n }, None)\n\n logger.info(\"-\" * 32)\n logger.info(\"Example bcl_convert.handler lambda output:\")\n logger.info(json.dumps(result))\n\n # assert bcl convert workflow runs 0 in db\n no_bcl_convert_workflow_runs = Workflow.objects.all()\n self.assertEqual(0, no_bcl_convert_workflow_runs.count())", "def process_response(self, resp):\n status_code = self.get_status_code(resp)\n if not 200 <= status_code < 300:\n return resp\n\n try:\n (subject_id, method, version) = self._fetch_request_info(\n resp.request)\n except TypeError:\n return resp\n\n if method == 'GET' and status_code == 204:\n # Bugfix:1251055 - Don't cache non-existent subject files.\n # NOTE: Both GET for an subject without locations and DELETE return\n # 204 but DELETE should be processed.\n return resp\n\n method_str = '_process_%s_response' % method\n try:\n process_response_method = getattr(self, method_str)\n except AttributeError:\n LOG.error(_LE('could not find %s') % method_str)\n # Nothing to do here, move along\n return resp\n else:\n return process_response_method(resp, subject_id, version=version)", "def test_parse_image_meta_orphan(image_orphan):\n ret = {\"Error\": \"This looks like an orphaned image, image payload was invalid.\"}\n assert _parse_image_meta(image_orphan, True) == ret", "def test_get_provider_traits_error(self, log_mock):\n uuid = uuids.compute_node\n resp_mock = mock.Mock(headers={\n 'x-openstack-request-id': uuids.request_id})\n self.ks_adap_mock.get.return_value = resp_mock\n\n for status_code in (400, 404, 503):\n resp_mock.status_code = status_code\n self.assertRaises(\n exception.ResourceProviderTraitRetrievalFailed,\n self.client.get_provider_traits, self.context, uuid)\n\n expected_url = '/resource_providers/' + uuid + '/traits'\n self.ks_adap_mock.get.assert_called_once_with(\n expected_url,\n global_request_id=self.context.global_id,\n **self.trait_api_kwargs)\n self.assertTrue(log_mock.called)\n self.assertEqual(uuids.request_id,\n log_mock.call_args[0][1]['placement_req_id'])\n self.ks_adap_mock.get.reset_mock()\n log_mock.reset_mock()", "def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)", "def _verify(self) -> None:\n # Check if the files already exist\n if os.path.exists(os.path.join(self.root, self.image_root)):\n return\n\n # Check if .zip files already exists (if so extract)\n exists = []\n for filename, md5 in zip(self.filenames, self.md5s):\n filepath = os.path.join(self.root, filename)\n if os.path.isfile(filepath):\n if self.checksum and not check_integrity(filepath, md5):\n raise RuntimeError(\"Dataset found, but corrupted.\")\n exists.append(True)\n extract_archive(filepath)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n raise RuntimeError(\n \"Dataset not found in `root` directory, either specify a different\"\n + \" `root` directory or manually download the dataset to this directory.\"\n )", "def test_validate_metadata_blank_samples(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_samples = [\n {\n \"batch_name\": \"my-batch\",\n \"samples\": [],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_samples)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)", "def test_checkmarx_init_http_error(self, mock_url_read, mock_error):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec\n\n self.assertIsNotNone(marx)\n mock_url_read.assert_called_once_with(\n 'http://url/cxrestapi/auth/identity/connect/token',\n post_body=b'username=un&password=pwd&scope=sast_rest_api&grant_type=password&'\n b'client_id=resource_owner_client&client_secret=014DF517-39D1-4453-B7B3-9930C563627C')\n mock_error.assert_called_once_with(\"HTTP error during the retrieving of access token!\")", "def test_extract_per_individual_state_metadatum_from_sample_metadata_invalid(\r\n self):\r\n self.assertRaises(KeyError,\r\n extract_per_individual_state_metadatum_from_sample_metadata,\r\n self.individual_states_and_responses_map_f1,\r\n state_category=\"TreatmentState\",\r\n state_values=[\"Pre\", \"Post\"],\r\n individual_identifier_category=\"PersonalID\",\r\n metadata_category=\"some-non-existant-category\",\r\n process_f=float)", "def detail_errorback(self, failure):\n\t\tglobal exitCode\n\t\tif hasattr(failure.value,'reasons') and any(reason.type is OpenSSL.SSL.Error for reason in failure.value.reasons):\n\t\t\tmessage = 'SSL error on ' + failure.request.url\n\t\t\tif sys.platform == 'win32' and not os.environ.get('SSL_CERT_FILE'):\n\t\t\t\tmessage += '''\\nOn Windows, you may have to set environment variable \"SSL_CERT_FILE\" to the location of root certificates bundle.\nYou may find the location by running\n> import certifi\n> certifi.where()'''\n\t\t\t# https://github.com/pyca/pyopenssl/issues/823#issuecomment-468675241 explains On Windows pyOpenSSL doesn't ship with any trust roots\n\t\t\t# https://twistedmatrix.com/documents/current/api/twisted.internet.ssl.html#platformTrust read SSL_CERT_FILE environment variable.\n\n\t\t\tself.logger.error(message)\n\t\t\texitCode = GooglePlayAdvancedSearch.Errors.sslErrorCode\n\t\t\traise CloseSpider('SSL error on ' + failure.request.url)", "def _fetch(self):\n if self.closed():\n raise self.Error(\"Failed to read from closed connection {!r}\".format(self.server.address))\n if self.defunct():\n raise self.Error(\"Failed to read from defunct connection {!r}\".format(self.server.address))\n if not self.responses:\n return 0, 0\n\n self._receive()\n\n details, summary_signature, summary_metadata = self._unpack()\n\n if details:\n log_debug(\"S: RECORD * %d\", len(details)) # TODO\n self.responses[0].on_records(details)\n\n if summary_signature is None:\n return len(details), 0\n\n response = self.responses.popleft()\n response.complete = True\n if summary_signature == SUCCESS:\n log_debug(\"S: SUCCESS (%r)\", summary_metadata)\n response.on_success(summary_metadata or {})\n elif summary_signature == IGNORED:\n self._last_run_statement = None\n log_debug(\"S: IGNORED (%r)\", summary_metadata)\n response.on_ignored(summary_metadata or {})\n elif summary_signature == FAILURE:\n self._last_run_statement = None\n log_debug(\"S: FAILURE (%r)\", summary_metadata)\n response.on_failure(summary_metadata or {})\n else:\n self._last_run_statement = None\n raise ProtocolError(\"Unexpected response message with signature %02X\" % summary_signature)\n\n return len(details), 1", "def test_update_http_error(self, data_update, requests_mock, capsys):\n requests_mock.put(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.update(data_url, data=data_update)\n assert 'HTTP error: 300' in capsys.readouterr().out", "def test_default_unsuccessful_verify_request(self, cred):\n # make the initial request\n resp = requests.get(verify_url.format('xml', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n assert tree[0].tag == 'request_id' and len(tree[0].text) <= 32\n assert tree[1].tag == 'status' and tree[1].text == '0'\n # now enter invalid verify code 3 times to terminate verification process\n # first invalid code check\n request_id = tree[0].text\n resp = requests.get(check_url.format('xml', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n assert tree[0].tag == 'request_id' and tree[0].text == request_id\n assert tree[1].tag == 'status' and tree[1].text == '16'\n assert tree[2].tag == 'error_text' and tree[2].text == code_does_not_match_msg\n # second invalid check\n resp = requests.get(check_url.format('xml', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n assert tree[0].tag == 'request_id' and tree[0].text == request_id\n assert tree[1].tag == 'status' and tree[1].text == '16'\n assert tree[2].tag == 'error_text' and tree[2].text == code_does_not_match_msg\n # third invalid check\n resp = requests.get(check_url.format('xml', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n # assert 'request_id' not in [child.tag for child in tree]\n assert tree[1].tag == 'status' and tree[1].text == '17'\n assert tree[2].tag == 'error_text' and tree[2].text == workflow_terminated_msg", "def test_get_response_with_retry__error_status(self, mock_get_thread_session):\n mock_requests_error_response = mock.Mock(status_code=500)\n mock_requests_response = mock.Mock(status_code=206)\n mock_requests_session = mock.create_autospec(requests.Session)\n mock_requests_session.get.side_effect = [\n mock_requests_error_response,\n mock_requests_response,\n ]\n mock_get_thread_session.return_value = mock_requests_session\n\n mock_presigned_url_provider = mock.create_autospec(\n download_threads.PresignedUrlProvider\n )\n presigned_url_info = download_threads.PresignedUrlInfo(\n \"foo.txt\", \"synapse.org/foo.txt\", datetime.datetime.utcnow()\n )\n\n mock_presigned_url_provider.get_info.return_value = presigned_url_info\n start = 5\n end = 42\n\n mock_syn = mock.Mock(spec=Synapse)\n mock_executor = mock.Mock(spec=concurrent.futures.Executor)\n downloader = _MultithreadedDownloader(mock_syn, mock_executor, 5)\n assert (start, mock_requests_response) == downloader._get_response_with_retry(\n mock_presigned_url_provider, start, end\n )\n\n expected_get_call_args_list = [\n mock.call(presigned_url_info.url, headers={\"Range\": \"bytes=5-42\"})\n ] * 2\n assert mock_requests_session.get.call_args_list == expected_get_call_args_list", "def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint", "def _connect(self):\n if self.should_connect:\n if self.last_id:\n self.requests_kwargs['headers']['Last-Event-ID'] = self.last_id\n self.resp = self.session.get(self.url, stream=True, **self.requests_kwargs)\n self.resp_iterator = self.resp.iter_content(decode_unicode=True)\n self.resp.raise_for_status()\n else:\n raise StopIteration()", "def mora_assert(response):\n assert response.status_code in (200, 201, 400, 404), response.status_code\n if response.status_code == 400:\n # Check actual response\n assert (\n response.text.find(\"not give raise to a new registration\") > 0\n ), response.text\n logger.debug(\"Request had no effect\")\n return None", "def test_multiple_auth_failures(self):\n http = FakeHttp([(FakeResponse(401), {}), (FakeResponse(401), {})])\n self.mock.Http.return_value = http\n response, _ = http_utils.request('https://url/', configuration=self.config)\n\n # Ensure that all expected requests were made.\n self.assertEqual(http.replies, [])\n\n self.assertEqual(response.status, 401)", "def test_drs_get_object_failure(self, testapp, testing_download): # noQA fixture\n res = testapp.get(testing_download)\n drs_object_uri = res.json['uuid']\n\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/not_a_uri/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/{drs_object_uri}/accesss/https')", "def test_index_manifest_packages_failure(data, gen3_index, gen3_auth, logfile):\n with patch(\n \"gen3.tools.indexing.index_manifest.Gen3Metadata.create\", MagicMock()\n ) as mock_mds_create:\n index_object_manifest(\n manifest_file=f\"{CURRENT_DIR}/test_data/{data['manifest']}\",\n auth=gen3_auth,\n commons_url=gen3_index.client.url,\n thread_num=1,\n replace_urls=False,\n submit_additional_metadata_columns=True,\n )\n mds_records = {\n kwargs[\"guid\"]: kwargs[\"metadata\"]\n for (_, kwargs) in mock_mds_create.call_args_list\n }\n assert len(mds_records) == 0\n\n indexd_records = {r[\"did\"]: r for r in gen3_index.get_all_records()}\n assert len(indexd_records) == 0\n\n for error in data[\"expected_error_msgs\"]:\n assert error in logfile.read()", "def scrape_story_metadata(self, story_id):\n url = '{0}/s/{1}'.format(self.base_url, story_id)\n result = requests.get(url)\n html = result.content\n #print html \n soup = BeautifulSoup(html, self.parser)\n\n # print soup\n try:\n pre_story_links = soup.find(id='pre_story_links').find_all('a')\n except AttributeError:\n pre_story_links = None\n if re.search(r\"var userid = (.*);\", str(soup)) is None:\n author_id = \"0\"\n else: \n author_id = int(re.search(r\"var userid = (.*);\", str(soup)).groups()[0]);\n #print re.search(r\"var title = (.*);\", str(soup))\n if re.search(r\"var title = (.*);\", str(soup)) is None:\n title = \"NO-TITLE\"\n else:\n title = re.search(r\"var title = (.*);\", str(soup)).groups()[0];\n title = unquote_plus(title)[1:-1]\n metadata_div = soup.find(id='profile_top')\n# times = metadata_div.find_all(attrs={'data-xutime':True})\n# metadata_text = metadata_div.find(class_='xgray xcontrast_txt').text\n# metadata_parts = metadata_text.split('-')\n# genres = self.get_genres(metadata_parts[2].strip())\n metadata = {\n 'id': story_id,\n# 'canon_type': pre_story_links[0].text,\n# 'canon': pre_story_links[1].text,\n 'author_id': author_id,\n 'title': title,\n# 'updated': int(times[0]['data-xutime']),\n# 'published': int(times[1]['data-xutime']),\n# 'lang': metadata_parts[1].strip(),\n# 'genres': genres\n }\n \"\"\"\n for parts in metadata_parts:\n parts = parts.strip()\n tag_and_val = parts.split(':')\n if len(tag_and_val) != 2:\n continue\n tag, val = tag_and_val\n tag = tag.strip().lower()\n if tag not in metadata:\n val = val.strip()\n try:\n val = int(val.replace(',', ''))\n metadata['num_'+tag] = val\n except:\n metadata[tag] = val\n if 'status' not in metadata:\n metadata['status'] = 'Incomplete'\n \"\"\"\n return metadata", "def test_http_error(self):\n self.assertRaises(HTTPError, lambda: self.d.artist(0).name)\n\n try:\n self.d.artist(0).name\n except HTTPError as e:\n self.assertEqual(e.status_code, 404)\n self.assertEqual('404: Resource not found.', str(e))", "def test_gus_bus_bad_payload_metadata():\n flowtask = FlowTaskFactory()\n flowtask.build_flow.build.repo.metadata = {\"key1\": True, \"key2\": \"hello, world\"}\n responses.add(\n \"POST\",\n f\"{settings.METACI_RELEASE_WEBHOOK_URL}/test-results\",\n json={\n \"detail\": [\n {\n \"loc\": [\"body\", \"build\", \"metadata\", \"productTagGusId\"],\n \"msg\": \"field required\",\n \"type\": \"value_error.missing\",\n }\n ]\n },\n status=422,\n )\n\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_failures.xml\",\n Path(output_dir) / \"output.xml\",\n )\n test_results = robot_importer.import_robot_test_results(flowtask, output_dir)\n with pytest.raises(Exception, match=\"value_error.missing\"):\n robot_importer.export_robot_test_results(flowtask, test_results)", "def indicate_failure(self):\n pass", "def test_update_metadata1(self):\n pass", "def on_failure(self):\n pass", "def samladsv3(self):\n try:\n # Get the federated credentials from the user\n print(\"[-] Get authentication token\")\n print(\"Email:\", end=' ')\n username = input()\n password = getpass.getpass()\n print('')\n\n # Initiate session handler\n session = requests.Session()\n\n # Programmatically get the SAML assertion\n # Opens the initial IdP url and follows all of the HTTP302 redirects, and\n # gets the resulting login page\n formresponse = session.get(idpentryurl, verify=sslverification)\n # Capture the idpauthformsubmiturl, which is the final url after all the 302s\n idpauthformsubmiturl = formresponse.url\n\n # Parse the response and extract all the necessary values\n # in order to build a dictionary of all of the form values the IdP expects\n formsoup = BeautifulSoup(formresponse.text, \"html.parser\")\n payload = {}\n\n for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):\n name = inputtag.get('name','')\n value = inputtag.get('value','')\n if \"user\" in name.lower():\n #Make an educated guess that this is the right field for the username\n payload[name] = username\n elif \"email\" in name.lower():\n #Some IdPs also label the username field as 'email'\n payload[name] = username\n elif \"pass\" in name.lower():\n #Make an educated guess that this is the right field for the password\n payload[name] = password\n else:\n #Simply populate the parameter with the existing value (picks up hidden fields in the login form)\n payload[name] = value\n\n # Debug the parameter payload if needed\n # Use with caution since this will print sensitive output to the screen\n #print(payload)\n\n # Some IdPs don't explicitly set a form action, but if one is set we should\n # build the idpauthformsubmiturl by combining the scheme and hostname\n # from the entry url with the form action target\n # If the action tag doesn't exist, we just stick with the\n # idpauthformsubmiturl above\n for inputtag in formsoup.find_all(re.compile('(FORM|form)')):\n action = inputtag.get('action')\n loginid = inputtag.get('id')\n if (action and loginid == \"loginForm\"):\n parsedurl = urlparse(idpentryurl)\n idpauthformsubmiturl = parsedurl.scheme + \"://\" + parsedurl.netloc + action\n\n # Performs the submission of the IdP login form with the above post data\n response = session.post(\n idpauthformsubmiturl, data=payload, verify=sslverification)\n\n # Debug the response if needed\n #print(response.text)\n\n # Overwrite and delete the credential variables, just for safety\n username = '##############################################'\n password = '##############################################'\n del username\n del password\n\n # Decode the response and extract the SAML assertion\n soup = BeautifulSoup(response.text, \"html.parser\")\n assertion = ''\n\n # Look for the SAMLResponse attribute of the input tag (determined by\n # analyzing the debug print lines above)\n for inputtag in soup.find_all('input'):\n if(inputtag.get('name') == 'SAMLResponse'):\n #print(inputtag.get('value'))\n assertion = inputtag.get('value')\n\n # Better error handling is required for production use.\n if (assertion == ''):\n #TODO: Insert valid error checking/handling\n print('Response did not contain a valid SAML assertion')\n sys.exit(0)\n\n # Debug only\n #print(base64.b64decode(assertion))\n\n # Parse the returned assertion and extract the authorized roles\n awsroles = []\n root = ET.fromstring(base64.b64decode(assertion))\n for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):\n if (saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role'):\n for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):\n awsroles.append(saml2attributevalue.text)\n\n # Note the format of the attribute value should be role_arn,principal_arn\n # but lots of blogs list it as principal_arn,role_arn so let's reverse\n # them if needed\n for awsrole in awsroles:\n chunks = awsrole.split(',')\n if'saml-provider' in chunks[0]:\n newawsrole = chunks[1] + ',' + chunks[0]\n index = awsroles.index(awsrole)\n awsroles.insert(index, newawsrole)\n awsroles.remove(awsrole)\n\n # If I have more than one role, ask the user which one they want,\n # otherwise just proceed\n print(\"\")\n if len(awsroles) > 1:\n i = 0\n print(\"Please choose the role you would like to assume:\")\n for awsrole in awsroles:\n print('[', i, ']: ', awsrole.split(',')[0])\n i += 1\n print(\"Selection: \", end=' ')\n selectedroleindex = input()\n\n # Basic sanity check of input\n if int(selectedroleindex) > (len(awsroles) - 1):\n print('You selected an invalid role index, please try again')\n sys.exit(0)\n\n role_arn = awsroles[int(selectedroleindex)].split(',')[0]\n principal_arn = awsroles[int(selectedroleindex)].split(',')[1]\n else:\n role_arn = awsroles[0].split(',')[0]\n principal_arn = awsroles[0].split(',')[1]\n\n # Use the assertion to get an AWS STS token using Assume Role with SAML\n conn = boto3.client('sts', region_name=region)\n token = conn.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion)\n\n # Read in the existing config file\n config = configparser.RawConfigParser()\n config.read(credentials)\n\n # Put the credentials into a saml specific section instead of clobbering\n # the default credentials\n if not config.has_section('saml'):\n config.add_section('saml')\n\n config['saml']['output'] = outputformat\n config['saml']['region'] = region\n config['saml']['aws_access_key_id'] = token['Credentials']['AccessKeyId']\n config['saml']['aws_secret_access_key'] = token['Credentials']['SecretAccessKey']\n config['saml']['aws_session_token'] = token['Credentials']['SessionToken']\n\n # Write the updated config file\n with open(credentials, 'w+') as configfile:\n config.write(configfile)\n\n # Give the user some basic info as to what has just happened\n print('\\n\\n----------------------------------------------------------------')\n print('Your new access key pair has been stored in the AWS configuration file {0} under the saml profile.'.format(credentials))\n print('Note that it will expire at {0}.'.format(token['Credentials']['Expiration'].astimezone(get_localzone())))\n print('After this time, you may safely rerun this script to refresh your access key pair.')\n print('To use this credential, call the AWS CLI with the --profile option (e.g. aws --profile saml ec2 describe-instances).')\n print('----------------------------------------------------------------\\n\\n')\n\n return samladsv3\n\n except Exception as e:\n print(\"Error while getting authentication token. %s\" % e)", "def test_fetch_metadata_for_dataset(self):\n\n with patch.object(pd, \"read_csv\") as func:\n func.return_value = pd.DataFrame(\n {\"Archive Link\": [\"test2\", \"test1\", \"test3\"],\n \"Update Date\": [\"2020/1/2\", \"2020/1/1\", \"2020/1/3\"]}\n )\n result = Network.fetch_metadata_for_dataset(\"test\")\n pd.testing.assert_frame_equal(\n result,\n pd.DataFrame(\n {\"Archive Link\": [\"test1\", \"test2\", \"test3\"],\n \"Update Date\": pd.date_range(\"2020/1/1\", \"2020/1/3\")}\n ).set_index(\"Update Date\")\n )\n func.assert_called_once_with(\n \"https://healthdata.gov/api/views/test/rows.csv\",\n dtype=str\n )", "def process_metadata(self):\n\n self._responses = self._get_responses()\n\n (\n self._request_body_parameter,\n self._request_body_class,\n self._request_body_content_types,\n ) = self._get_request_body_parameter()\n\n if self._request_body_content_types is None:\n self._request_body_content_types = [\"application/json\"]\n\n self._request_body_file_type = self._get_request_body_file_type()\n if self._request_body_parameter is not None and self._request_body_file_type is not None:\n raise TypeError(\"An endpoint cannot accept both a file and a model\")\n\n self._query_parameters = dict(self._get_query_string_parameters())\n self._path_parameters = dict(self._get_path_parameters())\n\n self._security = [*self._get_security_requirements()]\n self._tags = [*self._get_tags()]", "def _handle_creation_failure(session: Session, stub_dataset: Dataset, error: str):\n try:\n dataset.delete(session, stub_dataset)\n except requests.HTTPError:\n raise CreationFailure(\n f\"Created dataset did not delete after an earlier error: {error}\"\n )\n raise CreationFailure(error)" ]
[ "0.6340328", "0.6015547", "0.5980344", "0.58028024", "0.57724714", "0.5630444", "0.5626857", "0.55482787", "0.5516507", "0.5385363", "0.5247033", "0.51967573", "0.5134511", "0.51058185", "0.50990194", "0.5066909", "0.5056259", "0.5038707", "0.5012003", "0.49886125", "0.49666503", "0.4965181", "0.4964737", "0.49085736", "0.4903123", "0.48882276", "0.48471063", "0.4804742", "0.47837415", "0.47789124", "0.47734392", "0.47574466", "0.47304463", "0.47243202", "0.47199103", "0.47069302", "0.4700212", "0.46799415", "0.46786487", "0.466371", "0.46503222", "0.46463794", "0.46461836", "0.46316928", "0.4628741", "0.46279156", "0.46223867", "0.46181655", "0.46173167", "0.4606933", "0.46057907", "0.46033797", "0.46021053", "0.45994303", "0.45984042", "0.45943445", "0.4592716", "0.45786223", "0.45725146", "0.4557051", "0.45506915", "0.45435563", "0.45422688", "0.4541187", "0.45376563", "0.45353526", "0.45344484", "0.45295632", "0.45240775", "0.4520043", "0.45180368", "0.45121068", "0.45050535", "0.44938064", "0.4493519", "0.4491938", "0.44912997", "0.4479347", "0.4468931", "0.4466019", "0.44642085", "0.44379732", "0.44374132", "0.443049", "0.44246113", "0.44244233", "0.44217604", "0.44215205", "0.44213712", "0.44209898", "0.44160908", "0.44155112", "0.44154465", "0.44136855", "0.4410252", "0.44053796", "0.44040814", "0.4402707", "0.4396788", "0.43924326", "0.43873757" ]
0.0
-1
Ensure that if there's an HTTP failure while fetching user details from SAP SuccessFactors OData API.
def test_register_http_failure_in_odata(self): # Because we're getting details from the assertion, fall back to the initial set of details. self.USER_EMAIL = "[email protected]" self.USER_NAME = "Me Myself And I" self.USER_USERNAME = "myself" odata_company_id = 'NCC1701D' odata_api_root_url = 'http://api.successfactors.com/odata/v2/' mocked_odata_api_url = self._mock_odata_api_for_error(odata_api_root_url, self.USER_USERNAME) self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps({ 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': odata_api_root_url, 'odata_company_id': odata_company_id, 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', }) ) with LogCapture(level=logging.WARNING) as log_capture: self._test_register() logging_messages = str([log_msg.getMessage() for log_msg in log_capture.records]).replace('\\', '') assert odata_company_id in logging_messages assert mocked_odata_api_url in logging_messages assert self.USER_USERNAME in logging_messages assert 'SAPSuccessFactors' in logging_messages assert 'Error message' in logging_messages assert 'System message' in logging_messages assert 'Headers' in logging_messages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mock_odata_api_for_error(self, odata_api_root_url, username):\n\n def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument\n \"\"\"\n Return a 500 error when someone tries to call the URL.\n \"\"\"\n headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d'\n headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number\n return 500, headers, 'Failure!'\n\n fields = ','.join(SapSuccessFactorsIdentityProvider.default_field_mapping.copy())\n url = '{root_url}User(userId=\\'{user_id}\\')?$select={fields}'.format(\n root_url=odata_api_root_url,\n user_id=username,\n fields=fields,\n )\n httpretty.register_uri(httpretty.GET, url, body=callback, content_type='application/json')\n return url", "async def test_bad_retrieve_user_data(self, m):\n with self.assertRaises(aiohttp.web_exceptions.HTTPInternalServerError):\n await retrieve_user_data(\"bad_token\")", "def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_login_request_with_an_incorrect_authentication(self):\n user_res = self.ph.create_user(self.test_user_name, self.test_user_password)\n self.assertEqual(user_res.status_code, status.HTTP_201_CREATED)\n res = self.test_client.get(\n url_for('api.featurelistresource', _external=True))\n self.assertTrue(res.status_code == status.HTTP_401_UNAUTHORIZED)", "def test_unauthed_calls(self):\n expected_response = {\"errors\": [{\n \"message\": \"No valid client certificate presented\",\n \"status\": 401\n }],\n \"message\": \"No valid client certificate presented\"}\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/me',\n json=expected_response,\n status=401\n )\n resp = requests.get(f'{os.environ[\"AIVEN_API_URL\"]}/v1/me')\n\n assert resp.status_code == 401\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == f'{os.environ[\"AIVEN_API_URL\"]}/v1/me'\n assert responses.calls[0].response.json() == expected_response", "def test_api_ping_failed_nouser(self):\r\n res = self.testapp.get('/api/v1/ping?api_key=' + API_KEY,\r\n status=200)\r\n ping = json.loads(res.body)\r\n\r\n self.assertTrue(not ping['success'])\r\n self.assertEqual(ping['message'], \"Missing username in your api url.\")\r\n self._check_cors_headers(res)", "def test_get_single_user_is_missing(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/999')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])", "def test_search_user_fail_on_non_200_response(self) -> None:\n responses.add(responses.GET, local_app.config['SEARCHSERVICE_BASE'] + SEARCH_USER_ENDPOINT,\n json=self.mock_search_table_results, status=HTTPStatus.INTERNAL_SERVER_ERROR)\n\n with local_app.test_client() as test:\n response = test.get('/api/search/v0/user', query_string=dict(query='test', page_index='0'))\n self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)", "async def test_bad_none_retrieve_user_data(self, m):\n m.get(\"http://test.csc.fi/userinfo\", payload={\"not_ga4gh\": [{}]})\n user_data = await retrieve_user_data(\"good_token\")\n self.assertEqual(user_data, None)", "def _process_fetch_failure(self):\n logger.info('DataFetcher: No valid result is received')\n if len(self.urls_processed) == len(self.urls):\n raise NoDataReceivedFromCaster()\n for _, error_code, error_text in self._curls_failed:\n if error_code == PYCURL_TIMEOUT_ERRNO:\n raise ExceededTimeoutError(error_text)\n if self._curls_failed:\n _, _, error_text = self._curls_failed[0]\n raise UnableToConnect(error_text)\n raise NoDataReceivedFromCaster()", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_user_fail_unauthorised():\n\n client = APIClient()\n\n response = client.get(reverse(\"user-detail\"), format=\"json\")\n assert response.status_code == status.HTTP_401_UNAUTHORIZED", "def test_request_users_user_invalid(self):\n response = requests.get(self.url + '/users/invalid')\n\n self.assertEqual(response.status_code, 404)", "def test_retrieve_user_unauthorized(self):\n # HTTP GET Request\n response = self.client.get(ME_URL)\n\n # If you call the URL without authorization\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_user_get_failure_using_basic_auth(self):\n # setup\n user = self.generate_username_password()\n resp = self.create_user(user)\n resp_body = resp.json()\n try:\n assert resp.status_code == 201\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body[\"username\"] == user[\"userName\"]\n assert resp_body[\"userID\"] != \"\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n uuid_ = str(uuid.uuid4())\n\n # test\n resp2 = self.get_user_basic_auth(uuid_, user)\n resp_body2 = resp2.json()\n assert resp2.status_code == 401\n assert resp2.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body2[\"code\"] == \"1207\"\n assert resp_body2[\"message\"] == \"User not found!\"\n\n # teardown:\n resp3 = self.delete_user_basic_auth(resp_body[\"userID\"], user)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)", "def login_user_fails(self):\n response = self.client.post(self.login_url,\n self.invalid_user_login_details, format='json')\n return response", "def test_invalid_credentials_unauthorized(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPUnauthorized)\n self._check_response(response, 103)\n self.assertEqual(UserFitbit.objects.count(), 0)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def error_on_unauthorized():\n\n username = get_jwt_identity()\n user = Login.query.filter_by(username=username).first()\n\n if user is None:\n raise APIError(400, \"User {username} does not exist on this server\".format(username=username))\n elif user.role is not Role.admin:\n raise APIError(401, \"Only administrators have access to this page\")", "def test_retrieve_users_unauthorized(setup_client):\n client = setup_client\n res = client.get(ME_URL)\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))", "def test_api_user_get(self):\n pass", "def is_failed_user_data_retrieval(self):\n return self._tag == 'failed_user_data_retrieval'", "def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_unknown_user(self):\n self.sign_in()\n response = self.client.get(reverse('backend:user_details', args=(0,)))\n self.assertEqual(response.status_code, 404)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def ping_missing_user(request):\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'Missing username in your api url.'\r\n })", "def test_error_inexistent_user_data(self, test_client, init_database, insert_user_db):\n login_url = '/api/v1/auth/login'\n login_data = {\n 'email': insert_user_db.email,\n 'password': 'password'\n }\n login_response = test_client.post(login_url, json=login_data)\n init_database.session.delete(insert_user_db)\n init_database.session.commit()\n\n url = '/api/v1/auth/me'\n headers = {\n 'Authorization': 'Bearer {}'.format(login_response.json['access'])\n }\n response = test_client.get(url, headers=headers)\n\n assert response.status_code == 404\n assert response.json['code'] == 'NOT_FOUND'\n assert response.json['message'] == 'User not found!'\n assert response.json['details'] == ['User not found!']", "def check_response_errors(self, resp):\n return True", "def failure(self, error):\n \n self.request.response.status_int = 400\n return None", "def check_auth_none(self, username):\n return AUTH_FAILED", "def test_bad_http(self):\n # Setup the mocked response\n responses.add(responses.GET, self.api_url, json=self.error_response,\n status=404, match_querystring=False)\n\n acme = ACMEAccount(client=self.client)\n self.assertRaises(HTTPError, acme.all, self.org_id)\n\n # Verify all the query information\n self.assertEqual(len(responses.calls), 1)\n self.match_url_with_qs(responses.calls[0].request.url)", "def test_register_http_failure(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps({\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n })\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"[email protected]\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()", "def test_fetch_user(self):\n\n self.register_user()\n\n self.assertEqual(self.fetch_user_details().status_code, 200)\n\n self.assertTrue(self.fetch_user_details(\n ).json[\"data\"][0][\"username\"] == 'Bjorn')", "def _handle_aprs_error(self, failure):\n\n # Log the error\n logging.error(\"An error occured in the '\"+self._service_id+\"' service while querying the APRS.fi API: \"+\n failure.getErrorMessage())\n\n return None", "def test_get_review_detail_fail(self):\n client = Client()\n response = client.get('/api/review/1/')\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.get('/api/review/7/')\n self.assertEqual(response.status_code, 404)", "def _raise_performing_request_error(self, *args, **kwargs):", "def test_user_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.user_header)\n self.assertEqual(response.status_code, 401)", "def test_request_users_user_invalid_resource(self):\n response = requests.get(self.url + '/users/John/invalid')\n\n self.assertEqual(response.status_code, 404)", "def test_lti20_request_handler_bad_user(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n self.system.get_real_user = Mock(return_value=None)\r\n mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n self.assertEqual(response.status_code, 404)", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def auth_error():\n return unauthorized('Invalid credentials')", "def test_ApiWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n api = Api(self.userId, \"\")\n self.assertFalse(api.connected())", "def test_getting_inactive_user(self):\n url = self.get_url(self.inactive_user.id)\n with authenticated_user_api_client(self.client, self.active_user):\n expected_response_code = 404\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, expected_response_code)", "def resolve_failure(self):\n\t\tpass", "def _check_for_api_errors(geocoding_results):\n status_result = geocoding_results.get(\"STATUS\", {})\n api_call_success = status_result.get(\"status\", \"\") == \"SUCCESS\"\n if not api_call_success:\n access_error = status_result.get(\"access\")\n access_error_to_exception = {\n 'API_KEY_INVALID': GeocoderAuthenticationFailure,\n 'OVER_QUERY_LIMIT': GeocoderQuotaExceeded,\n }\n exception_cls = access_error_to_exception.get(\n access_error, GeocoderServiceError\n )\n raise exception_cls(access_error)", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def test_validate_when_user_not_found(self, view, mget_user):\n mget_user.side_effect = NoResultFound()\n\n with raises(HTTPUnauthorized):\n view.validate()", "def test_user_info_without_header(self, app, auth_user):\n data = UserInfo.random()\n us_info = app.user_info.add_user_info(data=data, user_id=auth_user.uuid,\n header=None, type_response=AuthInvalidResponse)\n assert us_info.status_code == 401, \"Check status code\"\n assert us_info.data.description == ResponseText.DESCRIPTION_AUTH_ERROR\n assert us_info.data.error == ResponseText.ERROR_AUTH_TEXT\n assert us_info.data.status_code == 401, \"Check status code\"", "def test_nonexistent_user(self):\n self.client.login(username=self.global_staff.username, password=self.password)\n resp = self.client.get(self.get_url('IDoNotExist'))\n assert resp.status_code == status.HTTP_404_NOT_FOUND", "def raise_for_status(self):\n if self._response.status_code == 200:\n pass\n else:\n raise ApiError(\n f\"Status Code: {self._response.status_code}, \\\n Error: {getattr(self, 'error', 'No Data')}, \\\n Message: {getattr(self, 'message', 'No Data')}\")", "def test_retrieve_author_unlogged(self):\n request = self.client.get(self.epoint)\n\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def _raise_http_error(self, *args, **kwargs):", "def test_004_get_user_not_found(self, mock_db_query):\n mock_db_query.get.return_value = None\n\n response = self.app.get('/v1/users/0', headers={'accept': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 404)\n self.assertIn('User not found', response.get_data().decode())", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_request_users_user(self):\n response = requests.get(self.url + '/users/John')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(response.json())", "def test_sees_error_message_if_username_doesnt_exist(self):\n response = self.app.post(\n \"/api/users/login\",\n data=json.dumps(\n dict(\n email=USER_DATA[\"email\"] + \"x\",\n password=USER_DATA[\"credential1\"],\n )\n ),\n content_type=\"application/json\",\n follow_redirects=True,\n )\n res = response.data.decode(\"ASCII\")\n res = json.loads(res)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(\n res[\"message\"], \"Invalid email, Please try again\"\n )", "def test_invalid_credentials_forbidden(self):\n response = self._mock_utility(get_kwargs=self._data(),\n error=fitbit_exceptions.HTTPForbidden)\n self._check_response(response, 103)\n self.assertEqual(UserFitbit.objects.count(), 0)", "async def test_good_retrieve_user_data(self, m):\n m.get(\"http://test.csc.fi/userinfo\", payload={\"ga4gh_passport_v1\": [{}]})\n user_data = await retrieve_user_data(\"good_token\")\n self.assertEqual(user_data, [{}])", "def test_nonexistent_user(self):\n nonexistent_username = \"nonexistent user\"\n self.retired_username = get_retired_username_by_username(nonexistent_username)\n data = {'username': nonexistent_username}\n headers = self.build_jwt_headers(self.superuser)\n response = self.client.post(self.url, data, **headers)\n self.assert_response_correct(response, 404, None)", "def test_get_non_existing_token_authenticated_user(self):\r\n\r\n user_no_tokens = UserFactory.create_batch(2)[1]\r\n\r\n res = self.app.get('/api/token/twitter?api_key=' + user_no_tokens.api_key)\r\n error = json.loads(res.data)\r\n\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'GET', error\r\n assert error['target'] == 'token', error\r\n assert error['exception_cls'] == 'NotFound', error", "def test_user_info_no_violations(self):\n fake_fetchall = MagicMock()\n fake_fetchall.return_value = []\n db = database.Database()\n db._cursor.fetchall = fake_fetchall\n\n exceeded_quota_epoch = db.user_info('sally')\n expected = (0, 0)\n\n self.assertEqual(exceeded_quota_epoch, expected)", "def validate_connection(self):\n __method_name = inspect.currentframe().f_code.co_name\n res = self.pull(\n url=self.base_url + consts.OAUTH2_ENDPOINT,\n auth=HTTPBasicAuth(self.client_id, self.client_secretkey),\n data={\"grant_type\": \"client_credentials\"},\n method=\"POST\",\n )\n if res and res.get(\"access_token\"):\n self.session.headers[\"Authorization\"] = \"bearer {}\".format(\n res.get(\"access_token\")\n )\n self.applogger.info(\n \"{}(method={}) : {} : Validation successful.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n return\n self.applogger.error(\n \"{}(method={}) : {} : Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n raise Exception(\n \"Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'\n )", "def test_ApiConnectionWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n connection = ApiConnection(self.userId, \"\")\n self.assertFalse(connection.connected())", "def test_downstream_forums_error(self):\n self.register_get_user_retire_response(self.user, status=500, body=\"Server error\")\n headers = self.build_jwt_headers(self.superuser)\n data = {'username': self.user.username}\n response = self.client.post(self.url, data, **headers)\n self.assert_response_correct(response, 500, '\"Server error\"')", "def test_call_httperror(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(500)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)", "def test_retrive_user(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['email'], self.user.email)\n self.assertEqual(res.data['name'], self.user.name)\n self.assertNotIn('password', res.data)", "def test_unauthorized_user(self):\n response_decoded_json = requests.post(URL_AUTH['url_login'], \n data=json.dumps(AUTH_PAYLOADS['payload_unauth']),\n headers=HEADER['header'])\n mes = response_decoded_json.json()\n assert 400 == response_decoded_json.status_code, \"You have BAD REQUEST\"\n assert \"User not found\" == mes, \"There is unexpected ability to login as unknown user\"", "def test_get_user_non_exist_id(self):\n print('(' + self.test_get_user_non_exist_id.__name__+')',\n self.test_get_user_non_exist_id.__doc__)\n self.assertIsNone(self.connection.get_user(NON_EXIST_PATIENT_USERNAME))", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_user_information_request(self):\n pass", "def testGetUserWithoutData(self):\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n deferred = self.facade.getUser(session, u'unknown')\n error = yield self.assertFailure(deferred, TNoSuchUser)\n self.assertEqual(u'unknown', error.name)", "def test_get_fail(self):\n response = self.second_client.get(self.url)\n self.assertEquals(response.status_code, 400)", "def test_not_authenticated(self):\n self.client.logout()\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 101)\n self.assertEqual(UserFitbit.objects.count(), 1)", "def test_health_facilities_endpoint_with_bad_endpoint(self):\n response = self.client.get(\"search/healthfacilities?q=Kitale\")\n self.assertIn(b'\"status\": \"FAILED\"', response.data)", "def test_anonymous_cannot_get_userprofileview(dclient):\n resp = dclient.get(\"/api/record/profile/\", follow=True)\n assert resp.status_code == 403", "def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)", "def test_obtain_issues_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)", "def on_failure(self, exc: BaseException) -> None:", "def test_api_call_without_token(self):\n res = self.client().get('/actors')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Authentication error.\")", "def test_get_user_id_unknown_user(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n self.assertIsNone(self.connection.get_user_id(\n NON_EXIST_PATIENT_USERNAME))", "def test_real_user_is_none(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n self.xmodule.has_score = True\n self.system.get_real_user = Mock(return_value=None)\n request = Request(self.environ)\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'User not found.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_results_failed(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser_id\n\n address = \"1215 Brookview Ave, Kettering, Ohio 45409\"\n\n resp = c.get(f\"/results/{address}\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\n '<h1 class=\"mt-3 display-2 text-center\"><b>Maybe try living somewhere closer to civilization, testuser</b></h1>',\n html,\n )\n self.assertIn(\n '<i class=\"fas fa-sad-cry fa-8x amber-text\"></i>', html,\n )", "def test_login_fail_no_users_in_db(self):\n serializer = G(LoginSerializer)\n response = self.client.post(self.login_url, serializer.data, format='json')\n self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_endpoint_access_fail(self):\n url = reverse('users:activate-from-email', args=(1, 1))\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)", "def error(request):\r\n #Retrieve information which caused an error\r\n messages = get_messages(request)\r\n info =''\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n return profile(request, info, info, messages)\r\n except:\r\n return _show_login(request, 'accounts/login.html', {'messages' : messages})", "def test_user_not_found(self):\n data = {'email': 'unmatched email'}\n response = self.client.post(self.url, data=data)\n\n expected_response_code = 200\n\n self.assertEqual(expected_response_code, response.status_code)\n self.assertFalse(response.data.get('exists'))", "def test_expense_summary_loads_properly(self):\n response = self.client.get('your_server_ip:8000/auth/login/expense/expense_summary')\n self.assertEqual(response.status_code, 404)", "def test_404_is_returned_when_device_not_found(self):\n data = {\n 'sip_user_id': '987654321',\n }\n response = self.client.post(self.check_in_url, data)\n\n self.assertEquals(response.status_code, 404)\n self.device.refresh_from_db()\n self.assertEquals(self.device.last_seen, self.last_seen_date)", "def _check_errors(self, json_loaded):\n\n content = json_loaded\n try:\n m = content[u'error'][u'message']\n c = content[u'error'][u'code']\n out= \"API Error code: {}\\nError message: {}\".format(c, m)\n raise InvalidQueryException(self.name, out)\n except KeyError:\n pass", "def handle_failure_request(self) -> HttpResponse:\n return HttpResponseNotFound()", "def test_real_user_is_none(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n self.xmodule.has_score = True\r\n self.system.get_real_user = Mock(return_value=None)\r\n request = Request(self.environ)\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'User not found.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def test_social_auth_exception(self):\n self._setup_provider_response_with_body(200, json.dumps(\"false\"))\n response = self.client.post(self.url, self.data())\n self._assert_access_token_error(response, \"The provided access_token is not valid.\", \"tpa-invalid-access-token\")\n self._verify_user_existence(user_exists=False, social_link_exists=False)" ]
[ "0.6441003", "0.60609096", "0.6001092", "0.5984341", "0.5984341", "0.59653944", "0.5955787", "0.5928276", "0.59214807", "0.59023386", "0.5876574", "0.5876278", "0.58514905", "0.58513963", "0.5837443", "0.583195", "0.5824492", "0.5800869", "0.57902473", "0.577529", "0.5762384", "0.5741957", "0.5711855", "0.5703913", "0.5701664", "0.56940955", "0.5677905", "0.56610453", "0.56594723", "0.5656055", "0.5654844", "0.56456304", "0.563285", "0.56320673", "0.5624577", "0.5622054", "0.5619268", "0.5617144", "0.561038", "0.560653", "0.5598003", "0.55604696", "0.5556809", "0.5556064", "0.552522", "0.5512177", "0.55097514", "0.5509125", "0.54990953", "0.54869586", "0.5484616", "0.5482824", "0.5477907", "0.5477208", "0.54722804", "0.5469548", "0.546795", "0.5464893", "0.5462091", "0.5459231", "0.54519725", "0.5449329", "0.5441279", "0.5438765", "0.5431041", "0.5424107", "0.54032224", "0.5398466", "0.53897625", "0.5374403", "0.5373601", "0.53735095", "0.53655666", "0.53612626", "0.53612626", "0.53612626", "0.53612626", "0.5359674", "0.5359635", "0.5359316", "0.5359106", "0.53588384", "0.5354239", "0.5353785", "0.53524375", "0.5350014", "0.5339402", "0.53383225", "0.53359884", "0.53357816", "0.5329295", "0.53232026", "0.5318672", "0.53173923", "0.53096586", "0.5308008", "0.5305445", "0.5303173", "0.5301846", "0.5301224" ]
0.58489734
14
Method handling all item specific processing. Returns dictionary containing scraped report.
def process_item(self, item, spider): # Memory has extra postfix letters and they need to be remove # and then converted into actual integer numeric = RE_MATCH.match(item['Memory']).group(0) item['Memory'] = int(numeric) # The same case as above but here the value is a float numeric = RE_MATCH.match(item['Base Frequency']).group(0) item['Base Frequency'] = float(numeric) """ Some folks identify MB as number making it 'int' in the spider causing Pandas to get crazy so the value is explicity marked as 'str'. Also sometimes motherboard information is missing but as it is not necessarily making the data obsolete, the result is still stored. """ item['Motherboard'] = str(item['Motherboard']) if 'Motherboard' in item else '' # In order to keep potential string processing simples, they are all # converted into lowercase strings. for key in item: if type(item[key]) is str: item[key] = item[key].lower() return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processItem(self):\r\n self.extract()\r\n self.mergeLanguageClaims()\r\n self.validateClaims()\r\n self.mergeWithWikidata()\r\n self.writeToWikidata()\r\n self.log()", "def _parse_item(self, item):\n result = {}\n for f in self._invoice_report_item_fields:\n val = get_value_by_relation_path(item, f)\n # when it's function - call it! usefull for Choices\n # (get_<field_name>_display)\n if callable(val):\n val = val()\n elif isinstance(val, datetime.datetime):\n val = val.strftime(self._invoice_report_datetime_format)\n elif isinstance(val, Money):\n val_currency = '{}_currency'.format(self._price_field)\n result[val_currency] = str(val.currency) \\\n if val.currency else self._invoice_report_empty_value\n val = val.amount\n result[f] = str(val) if val else self._invoice_report_empty_value\n\n return result", "def process_item(self, item, spider):\n writer = csv.writer(self.file, delimiter = '|')\n for apartment in item[\"apartments\"]:\n row = [apartment[\"price\"], apartment[\"size\"], apartment[\"rooms\"], apartment[\"address\"], apartment[\"lat\"],\n apartment[\"lng\"], apartment[\"zone\"], apartment[\"band\"], apartment[\"east\"], apartment[\"north\"],\n apartment[\"date\"]]\n writer.writerow(row)\n self.file.flush()\n print(\"page {} processed.\".format(item[\"page\"]))\n return item", "def _get_report_data(self, request, queryset):\n first_item = queryset[0]\n data = {\n 'id': str(slugify(first_item.invoice_no)),\n 'property_of_id': (\n first_item.property_of.id\n if first_item.property_of else None\n ),\n 'model': queryset.model._meta.model_name,\n 'base_info': {\n 'invoice_no': first_item.invoice_no,\n 'invoice_date': first_item.invoice_date,\n 'provider': first_item.provider,\n 'datetime': datetime.datetime.now().strftime(\n self._invoice_report_datetime_format\n ),\n },\n 'items': list(map(self._parse_item, queryset)),\n 'sum_price': str(\n queryset.aggregate(\n Sum(self._price_field)\n ).get('{}__sum'.format(self._price_field))\n )\n }\n logger.info('Invoice report data: {}'.format(data))\n return data", "def process_item(self, item, spider):\n item['url'] = spider.config['site_domain'] + item[\"url\"]\n item[\"rating\"] = extract_rating(item[\"rating\"])\n item['price'] = get_price(item['price_integer'], item[\"price_decimal\"])\n item['no_discount_price'] = get_price(item['no_discount_price_integer'], item[\"no_discount_price_decimal\"])\n item[\"brand\"] = get_brand(item[\"brand\"])\n item[\"number_of_ratings\"] = get_number_of_ratings(item[\"number_of_ratings\"])\n del item['price_integer']\n del item['price_decimal']\n del item['no_discount_price_integer']\n del item[\"no_discount_price_decimal\"]\n return item", "def parse_main(self, response):\n\n for i in response.xpath('//div[contains(@class,\"products-list__item\")]'):\n item = {\n \"VENDORID\": 1055,\n \"VENDOR\": 'JC SALES',\n \"ITEMNO\": i.xpath('.//span[contains(text(),\"Item No:\")]/text()').get().replace('Item No:', '').strip(),\n \"DESCRIPTION\": i.xpath('.//div[contains(@class,\"product-card__name\")]//a/text()').get(),\n \"IMAGE_URL\": i.xpath('.//div[contains(@class,\"product-card__image\")]//img[1]/@src').get(),\n \"PAGE_TITLE\": response.css('title::text').get(),\n \"PAGE_URL\": response.request.url\n }\n yield Request(response.urljoin(i.xpath('.//a[contains(@class,\"image__body\")]/@href').get()),\n self.parse_details, meta={'item': item})\n\n next_page = response.xpath('//a[text()=\">\"]/@href').get()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse_main)", "def parse_items(self,response):\n sel = Selector(response)\n item = response.meta['job_item']\n company_item = response.meta['company_item']\n company_item['introduction'] = sel.xpath('//div[@class=\"job-item main-message noborder\"]/div[@class=\"content content-word\"]/text()').extract_first()\n company_item['address'] = sel.xpath('//div[@class=\"company-infor\"]/p/text()').extract_first()\n item['link'] = response.url\n item['requirement'] = sel.xpath('//div[@class=\"content content-word\"][1]/text()').extract_first()\n item['website_id'] = 7\n item['company'] = company_item\n print item\n yield item", "def parse_details(self, response):\n items = response.xpath(\"//*[@id='all']//div[@class='prdct-box']\")\n for i in items:\n image_url = response.urljoin(i.xpath(\".//div[@class='prdct-box1']/a[1]/@href\").get())\n description = i.xpath(\".//div[@class='prdct-box2']//a[1]/text()\").get()\n item_no = i.xpath(\".//div[@class='prdct-box2']//text()[3]\").get(default='').strip()\n upc = i.xpath(\".//*[contains(text(),'UPC')]/following-sibling::text()[1]\").extract()[0].strip()\n category = i.xpath(\"//*[@id='all']//*[@class='products']/text()\").get()\n case = i.xpath(\".//*[contains(text(),'Case')]/following-sibling::text()[1]\").extract()[0]\n yield {\n \"VENDORID\":1068,\n \"VENDOR\":'UPD',\n \"ITEMNO\":item_no,\n \"UPC\":upc,\n \"CATEGORY\":category,\n \"DESCRIPTION\":description,\n \"IMAGE_URL\":image_url,\n \"CASEPACK\":case,\n \"PAGE_TITLE\":response.css('title::text').get(),\n \"PAGE_URL\":response.request.url\n }\n\n next_page = response.xpath(\"//p[@class='page-num']//a/@href\").extract()\n if next_page is not None:\n for n in next_page:\n next_page_url = response.urljoin(n)\n yield scrapy.Request(next_page_url, callback=self.parse_details)", "def process_item(self, _item: dict):\n _item['coordinates'] = self.process_coordinates(\n _item['coordinates']\n )\n _item['countryName'] = self.process_country_name(\n _item['countryName']\n )\n _item['portName'] = self.process_port_name(\n _item['portName']\n )\n _item['unlocode'] = self.process_unlocode(\n _item['unlocode']\n )\n return _item", "def _crawler_result(item, response, spider):\n output_data.clear()\n output_data.append(dict(item))", "def process_item(self, item, spider):\n # remove SQL support\n # use csv to store data\n #check whether table already exsit in pd_dict\n if item[\"table\"] not in self.pd_dict:\n #check whether csv with table name exit\n file = basePath +'/'+ item[\"table\"]+'.csv'\n if os.path.exists(file):\n df = pd.read_csv(file)\n self.pd_dict.update({item[\"table\"]: df})\n else:\n df = pd.DataFrame(columns = ['animatetitle', 'othertitle', 'cross_s','nums', 'last_title'])\n self.pd_dict.update({item[\"table\"]: df})\n\n if item['animatetitle'] not in self.pd_dict[item[\"table\"]]['animatetitle'].values:\n self.pd_dict[item[\"table\"]] = self.pd_dict[item[\"table\"]].append(\n {'animatetitle' : item['animatetitle'], 'othertitle' : item['othertitle'], 'cross_s' : item['cross'],'nums':item['nums'], 'last_title':item['last_title']}, \n ignore_index = True)\n\n return item", "def getReport(request):\n\n\t#parameters needed for different REST API's\n\tparams = {\n\t\t'rid':-1,\n\t\t'year':-1,\n\t\t'con_num':-1,\n\t\t'assign_num':-1,\n\t\t'item_num':-1,\n\t\t'wtype': -1,\n\t\t'payno': -1,\n\t\t'snap': 0, #default is 0 for snapshots (for now)\n\t\t'issue_date': -1,\n\t}\n\n\t#loop over the parameters and set them if they appear in the api url\n\tfor p in params:\n\t\tif p in request.GET:\n\t\t\tparams[p] = request.GET[p]\n\n\n\t#get the request session and load data\n\ts = requests.Session()\n\tif not isinstance(rgen.ReportGenerator.get_url(params), dict):\n\t\tresponse = s.get(rgen.ReportGenerator.get_url(params))\n\n\t\t#set the iterator and the content\n\t\tit = json.loads(response.content)\n\t\tcontent = json.loads(response.content)\n\t\t\n\t\t#while a next page exists, parse the api\n\t\tpageNum = 1\n\t\twhile \"next\" in it:\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params) + '?page=' + str(pageNum))\n\t\t\tit = json.loads(response.content)\n\t\t\tcontent[\"items\"].extend(it[\"items\"])\n\t\t\tpageNum += 1\n\n\telse:\n\t\t#if the url is a list\n\t\tcontent = {}\n\t\tfor part in rgen.ReportGenerator.get_url(params):\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part])\n\t\t\tit = json.loads(response.content)\n\t\t\t#content = {\"part1\":{\"items\":[]}, \"part2\":{\"items\":[]}, \"part3\":{\"items\":[]}}\n\t\t\t\n\t\t\tcontent[part] = {}\n\t\t\tcontent[part][\"items\"] = []\n\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\n\t\t\tpageNum = 1\n\t\t\twhile \"next\" in it:\n\t\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part] + '?page=' + str(pageNum))\n\t\t\t\tit = json.loads(response.content)\n\t\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\t\t\t\tpageNum += 1\n\t\n\t#set the file object to be returned as a download\n\tfile = HttpResponse(rgen.ReportGenerator.formExcel(content, params), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n\tif params[\"rid\"] == '70':\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + ' No.' + params['issue_date'] + '.xlsx'\n\telse:\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + '.xlsx'\n\ts.close()\n\treturn file", "def process_items():\n global HAS_WATCH\n global HAS_FIRST_AID_KIT\n global HAS_FLASHLIGHT\n global HAS_RAINCOAT\n global HAS_COMPASS\n global HAS_BEARTRAP\n\n if \"Watch\" in ITEMS:\n HAS_WATCH = True\n if \"First Aid Kit\" in ITEMS:\n HAS_FIRST_AID_KIT = True\n if \"Flashlight\" in ITEMS:\n HAS_FLASHLIGHT = True\n if \"Raincoat\" in ITEMS:\n HAS_RAINCOAT = True\n if \"Compass\" in ITEMS:\n HAS_COMPASS = True\n if \"Bear Trap\" in ITEMS:\n HAS_BEARTRAP = True\n\n # Stupid little hack to provide 'immediate updates/effect' of having the below items\n if HAS_WATCH:\n update_title_area(\" Day: %d Time: %d:00 \" % (DAY, TIME))\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"", "def process_item(self, item, spider):\n tmp_dict = {}\n tmp_dict['comments'] = item['comments']\n tmp_dict['referenceName'] = item['referenceName']\n tmp_dict['referenceTime'] = item['referenceTime']\n tmp_dict['productColor'] = item['productColor']\n tmp_dict['productSize'] = item['productSize']\n self.savefile.write(u\"{0}\\n\".format(json.dumps(tmp_dict)))\n #raise DropItem()", "def _get_template_data(self):\n self._set_meta_info()\n if self._report_key == ReportTypes.SEARCH_TOC_REPORT:\n self._set_selected()\n elif self._report_key == ReportTypes.MHR_COVER:\n self._report_data['cover'] = report_utils.set_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n elif self._report_key == ReportTypes.MHR_REGISTRATION_COVER:\n self._report_data['regCover'] = report_utils.set_registration_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n if str(self._report_data.get('registrationType', '')).startswith('TRAN'):\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n else:\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_search_additional_message()\n elif self._report_key == ReportTypes.MHR_TRANSFER:\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n self._set_date_times()\n self._set_addresses()\n self._set_owner_groups()\n if self._report_key not in (ReportTypes.MHR_REGISTRATION,\n ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_TRANSPORT_PERMIT):\n self._set_notes()\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_selected()\n self._set_ppr_search()\n elif self._report_key == ReportTypes.SEARCH_BODY_REPORT:\n # Add PPR search template setup here:\n self._set_ppr_search()\n if self._report_key not in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE):\n self._set_location()\n if self._report_key != ReportTypes.MHR_TRANSPORT_PERMIT:\n self._set_description()\n return self._report_data", "def get_response(self, list_item):\n return {\n 'title': self.get_title(list_item),\n 'link': self.get_link(list_item),\n 'address': self.get_address(list_item),\n 'phone': self.get_phone(list_item),\n 'score': self.get_score(list_item),\n }", "def processReports(self):\n count = 0\n for r in self.reports:\n #need to change the next two lines so that the fields are not hard-coded\n self.currentCase = r.id\n self.currentText = r.impression.lower()\n self.analyzeReport(self.currentText,\n \"disease\",\n modFilters=['indication','probable_existence',\n 'definite_existence',\n 'historical','future','pseudoneg',\n 'definite_negated_existence',\n 'probable_negated_existence'])\n\n self.recordResults()", "def parse_details(self, response, item=None):\n \n assert item is not None, \"Provide an item\"\n \n if response:\n # Use individual WARN notice url\n item['url'] = response.url\n\n fields = item['fields']\n \n dt = get_text_of_matching_elements(response, '//dt')\n dd = get_text_of_matching_elements(response, '//dd')\n\n data = dict(zip(dt, dd))\n \n # Update fields with additional data\n fields.update(data)\n item['fields'] = fields\n\n # Generate normalized fields\n norm_fields = get_normalized_fields(self.fields_dict, pd.Series(fields)).to_dict()\n item['normalized_fields'] = norm_fields \n\n yield item", "def parse_detail_page(self, response):\n self.logger.info('Parse Detail Page function called on %s', response.url)\n item = response.meta.get('item', {})\n item['url'] = response.url\n item['title'] = response.css(TITLE_SELECTOR).extract_first(\"\").strip()\n item['price'] = self.get_price(response)\n return item", "def parse_item_page_info(self, id, body):\n info = {}\n info['title'] = self.__re_search(body, *self.regx['title'])\n if info['title'] == 'Suggested Products':\n return None\n info['model'] = self.__re_search(body, *self.regx['model'])\n if self.__re_search(body, *self.regx['deactivated']):\n info['deactivated'] = True\n return info\n free_shipping = self.__re_search(body, *self.regx['free_shipping'])\n cart = self.__re_search(body, *self.regx['cart'])\n if free_shipping and not cart:\n info.update(self.parse_item_page_price(id, body))\n return info", "def parse_item(self, response):\n item = IphoneSpiderItem()\n\n item['sku'] = response.meta.get('sku')\n item['price'] = response.meta.get('price')\n item['name'] = response.meta.get('name')\n item['seller'] = response.meta.get('seller')\n #pass the data from parse to parse_item\n\n url = response.url\n model = response.xpath('//*[@id=\"crumb-wrap\"]/div/div[1]/div[9]/text()').extract_first()\n color = response.xpath('//div[@data-type=\"颜色\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/i/text()').extract_first()\n memory = response.xpath('//div[@data-type=\"版本\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n memory2 = response.xpath('//div[@data-type=\"内存\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n #memory data can be stored in 版本 or 内存\n\n if memory2:\n memory = memory2.strip()\n elif memory:\n memory = memory.strip()\n\n item['model'] = model\n item['color'] = color\n item['memory'] = memory\n item['url'] = url\n\n return item", "def parse_items(self):", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOTALL)\n javascript_containing_images = response.xpath('//script[contains(., \"var mygallery=\")]/text()').extract()[0]\n images = re.match(pattern, javascript_containing_images).group(2)\n image_array = json.loads(images)\n image_urls = [urlparse.urljoin(response.url, itm[1]) for itm in image_array]\n except Exception as e:\n print(\"{} - {}\".format(type(e), str(e)))\n\n tipe_mobil = response.css('#content font.vehicleinfo ~ font.warning::text').extract_first()\n model_mobil = response.css('#content font.vehicleinfo::text').extract_first()\n if tipe_mobil.lower() == model_mobil.lower():\n tipe_mobil = response.meta.get('type', None)\n main_group = response.meta.get('main_group', None)\n assembly_set = response.css('#content font.title b::text').extract_first()\n\n # sparepart items\n for row in response.css('div#content div.content table tr'):\n item = IsuzuSparepartItem()\n\n # source_url\n item['source_url'] = response.url\n\n # car model\n item['merk'] = self.name\n item['tipe_mobil'] = tipe_mobil\n item['model_mobil'] = model_mobil\n\n # images\n item['image_urls'] = image_urls\n\n # grouping/assembly\n item['main_group'] = main_group\n item['assembly_set'] = assembly_set\n\n item['key'] = row.css('td.intable:nth-child(1) .detailcontent::text').extract_first()\n item['part_number'] = row.css('td.intable:nth-child(2) .detailcontent::text').extract_first()\n item['itc'] = row.css('td.intable:nth-child(3) .detailcontent::text').extract_first()\n item['description'] = row.css('td.intable:nth-child(4) .detailcontent::text').extract_first()\n item['qty'] = row.css('td.intable:nth-child(5) .detailcontent::text').extract_first()\n item['app_date'] = row.css('td.intable:nth-child(6) .detailcontent::text').extract_first()\n item['lr'] = row.css('td.intable:nth-child(7) .detailcontent::text').extract_first()\n item['model'] = row.css('td.intable:nth-child(8) .detailcontent::text').extract_first()\n item['remarks'] = row.css('td.intable:nth-child(9) .detailcontent::text').extract_first()\n\n item_list.append(item)\n\n return item_list", "def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements", "def compute_response(self, items_to_process):\n pass", "def parse_item(self, response):\n NewhouseSpider.crawled_urls.append(response.url)\n item = FocusedScrapyCrawlerItem()\n item['url'] = response.url\n item['link_text'] = response.meta.get('link_text', '') if response.meta else ''\n soup = BeautifulSoup(response.body, 'html.parser')\n\n item['body_p_tags'] = self._getBodyText(soup)\n item['head_title'] = self._getHeadTitle(soup)\n item['last_crawled'] = time.time()\n links = self._getLinks(response, soup)\n\n # get score of the page based upon classifier\n if self.classifier:\n score = self.classifier.score(item['link_text'], item['head_title'], item['body_p_tags'])\n else:\n score = 0.0\n\n item['score'] = score\n yield item\n if score <= 0:\n self.log(\"item={} does not belong to new home so stop crawling\".format(item),\n logging.INFO)\n else:\n for link in links:\n req = Request(link, priority=int(score * 1000000), # after the request is done, run parse_item to train the apprentice\n callback=self.parse_item)\n yield req", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def parse(self, response):\n self.driver.get(response.url)\n product_category=response.meta[\"category_text\"]\n products=response.xpath(\"//*[(@class='list-item')]\")\n \n # item containers for storing product\n items = CrawlingECommerceItem()\n \n # iterating over search results\n # for product in products:\n # # Defining the XPaths\n # XPATH_PRODUCT_LINK=\".//*[contains(concat( ' ', @class, ' ' ), concat( ' ', 'goods-tit', ' ' ))]//a\"\n # XPATH_PRODUCT_NAME=\".//div[@class='goods-introudce']//a/@href\"\n # XPATH_PRODUCT_PRICE=\".//div[@class='catalog-detail']//div[@class='detail-right']//p/text()\"\n # XPATH_PRODUCT_IMAGE_LINK=\".//img\"\n\n # raw_product_name=product.xpath(XPATH_PRODUCT_NAME).get()\n # raw_product_price=product.xpath(XPATH_PRODUCT_PRICE).get()\n # raw_product_image_link=product.xpath(XPATH_PRODUCT_IMAGE_LINK).extract()\n # raw_product_link=product.xpath(XPATH_PRODUCT_LINK).get()\n\n # # cleaning the data\n # product_name=''.join(raw_product_name).strip(\n # ) if raw_product_name else None\n # product_price=''.join(raw_product_price).strip(\n # ) if raw_product_price else None\n # product_image_link=''.join(raw_product_image_link).strip(\n # ) if raw_product_image_link else None\n # product_link=''.join(raw_product_link).strip(\n # ) if raw_product_link else None\n\n # # storing item\n # yield CrawlingECommerceItem (\n # product_name=product_name,\n # product_price=product_price,\n # product_url=product_link,\n # product_category=product_category,\n # image_urls=raw_product_image_link\n # )\n\n # # yield items\n \n # XPATH_PRAGINATION_LINK=\"//*[(@class='next right')]/a/@href\"\n\n yield response.follow(str(response.request.url), callback = self.parse, meta = {\"category_text\": product_category})", "def parse_webpage(self, response):\n item = response.meta['item']\n print(\"Request url {}, actual requested url {}\".format(item['url'], response.request.url))\n # website url\n item['website_url'] = response.request.url\n\n item['name'] = self.guess_company_name(response)\n item['domain'] = self.get_domain(response)\n\n # get website title\n item['website_title'] = self.get_webpage_title(response)\n # get description from website\n item['website_desc'] = self.get_webpage_description(response)\n\n # get keywords from website\n item['keywords'] = self.get_webpage_keywords(response)\n\n # try to get email and phones\n item['email'] = self.extract_email(response)\n item['phone'] = self.extract_phone(response)\n\n if not item['email']:\n # try to get contact info\n # check if there is kontakt link on the page\n item = self.check_webpage_for_contact_details(item, response, \"impressum\")\n\n if not item['email']:\n try:\n # try Contact\n item = self.check_webpage_for_contact_details(item, response, \"kontakt\")\n\n except Exception as e:\n print(\"Exception\", e)\n\n if item['email']:\n item['email'] = item['email'].replace(\"(at)\", \"@\")\n yield item", "def get_final_result(self, spider):\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n if self.screenshots_ids:\n self.result['__screenshots_ids__'] = self.screenshots_ids\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id, '{renavam}-data_collected.json'.format(\n renavam=self.renavam))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url, callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)", "def get_item(context, item):\n item_instance = context.get(\"reports\") if item == \"reports\" else context.get(f\"report/{context.uuid['report']}\")\n if item != \"reports\":\n item_instance = [\n report for report in item_instance[\"reports\"] if report[\"report_uuid\"] == context.uuid[\"report\"]\n ][0]\n if item == \"notification_destination\":\n return item_instance[\"notification_destinations\"][context.uuid[\"notification_destination\"]]\n if item != \"report\":\n item_instance = item_instance[\"subjects\"][context.uuid[\"subject\"]]\n if item != \"subject\":\n item_instance = item_instance[\"metrics\"][context.uuid[\"metric\"]]\n if item != \"metric\":\n item_instance = item_instance[\"sources\"][context.uuid[\"source\"]]\n return item_instance", "def parse(self, response):\n\n\t\t### close spider if exception\n\t\tif 'Bandwidth exceeded' in response.body:\n\t\t\traise CloseSpider('bandwidth_exceeded')\n\n\t\tlog_scrap.debug(u\"\\n>>> NEW PARSING >>>\\n\" )\n\t\tlog_scrap.info(\"--- GenericSpider.parse ...\" )\n\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s\" , response)\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s \\n\" , response.__dict__.keys() )\n\n\t\t# for k, v in response.__dict__.iteritems() :\n\t\t# \tlog_scrap.info(\"\\n--- [k] {} : [v] {} : \".format(k,v))\n\t\t# print response._body\n\t\tstart_url = response.meta[\"start_url\"]\n\t\tlog_scrap.info(\"--- GenericSpider.parse / start_url : %s\", start_url )\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start request with API crawler\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t# if self.spider_config_flat[\"parse_api\"] == True :\n\t\tif self.parse_api == True :\n\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting request on API endpoint... \" )\n\t\t\tjsonresponse = json.loads(response.body_as_unicode())\n\t\t\t# log_scrap.info(\"--- GenericSpider.parse / jsonresponse : \\n%s\", jsonresponse )\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / jsonresponse received...\" )\n\n\t\t\traw_items_list = get_dictvalue_from_xpath(jsonresponse, self.item_xpath)\n\t\t\t# raw_items_list = jsonresponse[self.item_xpath]\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / raw_items_list[0] : \\n%s\\n...\", pformat(raw_items_list[0]) )\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - API\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH API ...\" )\n\n\t\t\t\t# while self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - API - item n°{} >>> \\n\".format(self.item_count) )\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_api_rest=True, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - API\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - API - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t# follow_link_raw = raw_data[ self.follow_xpath ]\n\t\t\t\t\t\t\tfollow_link_raw = get_dictvalue_from_xpath(raw_data, self.follow_xpath)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link_raw),follow_link_raw) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\turl_follow = self.page_url\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link_raw, url_root=url_follow)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\tfollow_is_api = self.follow_is_api\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url, 'item_n' : self.item_count , 'parse_api' : follow_is_api })\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t# log_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - API\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (API) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\turl_next = \"\"\n\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\turl_next = self.api_pagination_root\n\t\t\t\t\telse :\n\t\t\t\t\t\turl_next = self.page_url\n\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\tnext_page = url_next + str(self.page_count)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} \".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} \".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with pure Scrapy requests\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telif self.spider_config_flat[\"parse_reactive\"] == False :\n\t\t# elif self.parse_reactive == False :\n \n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting requests with Scrapy... \" )\n\t\t\t# self.parse_scrapy(response)\n\n\t\t\t### find items list\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / self.item_xpath : %s\", self.item_xpath )\n\t\t\traw_items_list = response.xpath(self.item_xpath)\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / len(raw_items_list) : %d \", len(raw_items_list) )\n\n\n\t\t\t### - - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - SCRAPY\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH SCRAPY ...\" )\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Scrapy - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t# print \">>> raw_data : \\n\", raw_data.extract()\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - SCRAPY\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SCRAPY - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\tfollow_link \t= raw_data.xpath( self.follow_xpath ).extract_first()\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t# log_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t# yield Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url } )\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False} )\n\t\t\t\t\t\t\t\t# log_scrap.warning(u\">>> FOLLOWING LINK --> url : {} / WORKED !!! \".format(url) )\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\n\t\t\t\t\t\t\tlog_scrap.warning(u\">>> NO FOLLOW LINK ... \" )\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.items() : \\n %s\", item.items() )\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.keys() : \\n %s\", item.items() )\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t# print (\"\\n>>> NEXT ITEM \" + \">>> >>> \"*10, \"\\n\")\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - SCRAPY\n\t\t\t### check if there is a test_limit\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (Scrapy) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tis_next_page, next_page = self.get_next_page(response, start_url)\n\n\t\t\t\t\tif is_next_page :\n\n\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\turl_next = \"\"\n\t\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\t\turl_next = self.api_pagination_root\n\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE I : %s\", next_page )\n\t\t\t\t\t\tnext_page = self.clean_link(next_page, url_root=url_next)\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE PAGE TO SCRAP - pages count : {} \".format(self.page_count) )\n\t\t\t\t\t\t# raise CloseSpider('NO MORE PAGE TO SCRAP')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with Selenium\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telse :\n\t\t\t### initiate selenium browser\n\t\t\t### cf : https://github.com/voliveirajr/seleniumcrawler/blob/master/seleniumcrawler/spiders/seleniumcrawler_spider.py\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting Selenium driver... \" )\n\n\t\t\t# retrieve exec path for chromedriver from settings_scrapy.py\n\t\t\t### GET APP MODE FROM ENV VARS\n\t\t\tapp_mode \t\t\t\t\t\t= os.environ.get('APP_MODE', 'default')\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / APP_MODE : %s\", app_mode)\n\t\t\tchromedriver_path \t= CHROMEDRIVER_PATH_LIST[ app_mode ]\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / chromedriver_path : %s\", chromedriver_path)\n\n\t\t\t### specify executable path to launch webdriver-->\n\t\t\t# cf : where chromedriver was installed when `brew install chromedriver`\n\t\t\tself.driver = webdriver.Chrome(executable_path=chromedriver_path, chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Chrome(chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Firefox()\n\t\t\t# self.driver = webdriver.Chrome()\n\t\t\t# self.driver = webdriver.PhantomJS() ### deprecated\n\n\t\t\t### setup waiting times\n\t\t\t# self.driver.set_page_load_timeout(60)\n\t\t\tself.wait_driver\t= WebDriverWait(self.driver, self.delay_driver)\n\t\t\tself.wait_page \t\t= WebDriverWait(self.driver, self.delay_new_page)\n\t\t\tself.driver.implicitly_wait(self.delay_implicit)\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_driver : %s\", self.delay_driver )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_new_page : %s\", self.delay_new_page )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_implicit : %s\", self.delay_implicit )\n\n\n\t\t\t### start parsing with selenium\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / response._url : %s\", response._url )\n\t\t\ttry :\n\t\t\t\tself.driver.get(response._url)\n\n\t\t\t\t### try scroll_down if needed in config\n\t\t\t\tif self.spider_config_flat['scroll_down'] : \n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / scroll_down is TRUE ... \" )\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericsSpider. / scroll_down - self.spider_config_flat : \\n%s\", pformat(self.spider_config_flat) )\n\n\t\t\t\t\tscroll_pause_time = self.spider_config_flat[\"scroll_pause_time\"]\n\t\t\t\t\tmax_loops \t\t\t\t= self.spider_config_flat[\"scroll_loops\"]\n\t\t\t\t\tself.driver = scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\t\t# scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / url '{}' is loaded ... \".format( response._url ))\n\t\t\t\n\t\t\texcept :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tself.driver.close()\n\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\traise CloseSpider('DRIVER NOT RESPONDING')\n\n\n\t\t\t### clean original xpath from strings\n\t\t\tstrings_to_clean = [\n\t\t\t\t'/@src',\n\t\t\t\t'/@href',\n\t\t\t\t'/text()',\n\t\t\t\t'/@*[name()=\"xlink:href\"]',\n\t\t\t\t'/@datetime'\n\t\t\t]\n\n\t\t\t# while self.there_is_more_items_to_scrap :\n\t\t\twhile self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap )\n\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap_dict[start_url] )\n\n\t\t\t\ttry :\n\n\t\t\t\t\t### wait / debug page content\n\t\t\t\t\tpage_source_code = self.driver.page_source.encode(\"utf-8\")\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / page_source_code : \\n %s \", page_source_code )\n\t\t\t\t\ttime.sleep(self.delay_new_page)\n\n\t\t\t\t\t### start parsing page :\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.item_xpath : %s\", self.item_xpath )\n\t\t\t\t\traw_items_list \t= self.driver.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / raw_items_list length : %s\", len(raw_items_list) )\n\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / raw_items_list[0].text : \\n%s\", raw_items_list[0].text )\n\n\t\t\t\t\t# current_item_index = 0\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### PARSING PAGE - SELENIUM\n\t\t\t\t\t# loop through data items in page in response\n\t\t\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / START PARSING WITH SELENIUM ...\\n\" )\n\n\t\t\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\t\t\tprint()\n\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / START LOOPING raw_items_list WITH SELENIUM ...\" )\n\n\t\t\t\t\t\t\t### add +1 to items count\n\t\t\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} \".format(str(self.spider_name), self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} \".format(self.spider_name, self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - item n°{} \".format(self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : '%s' - item n°%s \" %(self.spider_name, self.item_count) )\n\n\t\t\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Selenium - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t\t\t### FOLLOW LINK - SELENIUM\n\t\t\t\t\t\t\t\t### find follow link to open detailled item view\n\t\t\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SELENIUM - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t\t\t### follow link with Scrapy\n\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Scrapy ...\" )\n\n\t\t\t\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / get href of follow_link ...\" )\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link_xpath : %s \", follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link\t\t\t= raw_data.find_element_by_xpath( follow_link_xpath ).get_attribute('href')\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW : %s \", follow_link )\n\n\t\t\t\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {}\".format(type(follow_link), follow_link ) )\n\n\t\t\t\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\t\t\t\turl\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False})\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\n\t\t\t\t\t\t\t\t\t### follow link with Selenium\n\t\t\t\t\t\t\t\t\t### FIND A WEBSITE TEST FOR REACTIVE DETAILLED PAGES\n\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Selenium ...\" )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_link_xpath : %s\", self.follow_link_xpath )\n\t\t\t\t\t\t\t\t\t\tfollow_link \t\t= raw_data.find_element_by_xpath( follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\t### open link in new tab ?\n\t\t\t\t\t\t\t\t\t\tfollow_link.click()\n\n\t\t\t\t\t\t\t\t\t\t### get data and save data\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / get data and save data ...\" )\n\t\t\t\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t\t\t\t### back to previous page and scrap from where it left\n\t\t\t\t\t\t\t\t\t\t\t### cf : https://selenium-python.readthedocs.io/navigating.html#navigation-history-and-location\n\t\t\t\t\t\t\t\t\t\t\tself.driver.back()\n\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF LIMIT_ITEMS')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF ITEMS - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### NEXT PAGE - SELENIUM\n\t\t\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\t\t\tif self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\t\t\t\tprint ()\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.info(\" --- GenericSpider.parse (Selenium) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t\t\t\t### add +1 to parsed pages\n\t\t\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\n\t\t\t\t\t\t\t\t### find next page btn in current view\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.next_page : %s\", self.next_page )\n\t\t\t\t\t\t\t\tnext_page_xpath = clean_xpath_for_reactive(self.next_page, strings_to_clean)\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page_xpath : %s\", next_page_xpath )\n\t\t\t\t\t\t\t\t# next_page \t= re.sub(\"|\".join(strings_to_clean), \"\", next_page )\n\n\t\t\t\t\t\t\t\t# try :\n\t\t\t\t\t\t\t\t# element_present = EC.presence_of_element_located((By.XPATH, next_page_xpath ))\n\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page present : %s\", element_present )\n\t\t\t\t\t\t\t\t# self.wait.until(element_present)\n\t\t\t\t\t\t\t\t# next_page = self.wait.until( EC.element_to_be_clickable(element_present) )\n\t\t\t\t\t\t\t\t# next_page \t\t= self.driver.find_element_by_xpath( next_page_xpath )\n\t\t\t\t\t\t\t\tnext_page \t\t= self.driver.find_element(By.XPATH, next_page_xpath )\n\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page : %s\", next_page )\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.text : %s\", next_page.text )\n\n\t\t\t\t\t\t\t\t# except TimeoutException:\n\t\t\t\t\t\t\t\t# except :\n\t\t\t\t\t\t\t\t# \tlog_scrap.error(\"--- GenericSpider. / Timed out waiting for page to load\")\n\n\t\t\t\t\t\t\t\t### click next button and wait for ajax calls to complete (post and get)\n\t\t\t\t\t\t\t\t### cf : http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html\n\n\t\t\t\t\t\t\t\t# def wait_for(condition_function):\n\t\t\t\t\t\t\t\t# \t\tstart_time = time.time()\n\t\t\t\t\t\t\t\t# \twhile time.time() < start_time + 3:\n\t\t\t\t\t\t\t\t# \t\tif condition_function():\n\t\t\t\t\t\t\t\t# \t\t\treturn True\n\t\t\t\t\t\t\t\t# \t\telse:\n\t\t\t\t\t\t\t\t# \t\t\ttime.sleep(0.1)\n\t\t\t\t\t\t\t\t# \traise Exception ('Timeout waiting for {}'.format(condition_function.__name__) )\n\n\t\t\t\t\t\t\t\t# def link_has_gone_stale():\n\t\t\t\t\t\t\t\t# \t\ttry:\n\t\t\t\t\t\t\t\t# \t\t# poll the link with an arbitrary call\n\t\t\t\t\t\t\t\t# \t\tnext_page.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\t\t\t\t# \t\treturn False\n\t\t\t\t\t\t\t\t# \texcept StaleElementReferenceException :\n\t\t\t\t\t\t\t\t# \t\treturn True\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- ... ---\")\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.click() \" )\n\t\t\t\t\t\t\t\t\tnext_page.click()\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page.send_keys( \\ n )\" )\n\t\t\t\t\t\t\t\t\t# next_page.send_keys(\"\\n\")\n\t\t\t\t\t\t\t\t\t# added this step for compatibility of scrolling to the view\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / ALTERNATIVE next_page.click() \" )\n\t\t\t\t\t\t\t\t\t# self.driver.execute_script(\"return arguments[0].scrollIntoView();\", next_page)\n\t\t\t\t\t\t\t\t\t# next_page.click()\n\t\t\t\t\t\t\t\t\tself.driver.execute_script(\"arguments[0].click();\", next_page)\n\n\t\t\t\t\t\t\t\t### wait after click\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / wait for ajax to finish... \" )\n\t\t\t\t\t\t\t\t\t# wait_for(link_has_gone_stale)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return jQuery.active') == 0)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return document.readyState') == 'complete')\n\t\t\t\t\t\t\t\t\t# time.sleep(self.delay_implicit)\n\t\t\t\t\t\t\t\t\ttime.sleep(self.delay_new_page)\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / !!! FAIL / wait for ajax to finish... \" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF PAGES TO SCRAP - page n°{} / except -> break\".format(self.page_count) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF PAGES TO SCRAP')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\texcept :\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE ITEMS TO SCRAP - item_count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\tself.driver.close()\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\traise CloseSpider('NO MORE ITEMS TO SCRAP')\n\t\t\t\t\tbreak", "def parse_item_page_price(self, id, body):\n info = {}\n end = self.__re_search_item_pos(body, *self.regx['combo'])\n if end:\n body = body[:end[1]]\n info['original'] = self.__re_search(body, *self.regx['original'])\n info['save'] = self.__re_search(body, *self.regx['save'])\n info['price'] = self.__re_search(body, *self.regx['price'])\n info['rebate'] = self.__re_search(body, *self.regx['rebate'])\n return info", "async def wrap_up_processing_reports(self):\n if hasattr(Config(), 'results'):\n new_row = []\n for item in self.recorded_items:\n item_value = {\n 'global_round':\n self.current_global_round,\n 'round':\n self.current_round,\n 'accuracy':\n self.accuracy * 100,\n 'average_accuracy':\n self.average_accuracy * 100,\n 'edge_agg_num':\n Config().algorithm.local_rounds,\n 'local_epoch_num':\n Config().trainer.epochs,\n 'training_time':\n max([\n report.training_time for (report, __) in self.updates\n ]),\n 'round_time':\n time.perf_counter() - self.round_start_time\n }[item]\n new_row.append(item_value)\n\n if Config().is_edge_server():\n result_csv_file = f'{Config().result_dir}result_{Config().args.id}.csv'\n else:\n result_csv_file = f'{Config().result_dir}result.csv'\n\n csv_processor.write_csv(result_csv_file, new_row)\n\n if Config().is_edge_server():\n # When a certain number of aggregations are completed, an edge client\n # needs to be signaled to send a report to the central server\n if self.current_round == Config().algorithm.local_rounds:\n logging.info(\n '[Server #%d] Completed %s rounds of local aggregation.',\n os.getpid(),\n Config().algorithm.local_rounds)\n self.model_aggregated.set()\n\n self.current_round = 0\n self.new_global_round_begins.clear()\n # Wait until a new global round begins\n # to avoid selecting clients before a new global round begins\n await self.new_global_round_begins.wait()", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data = {}\n data[\"qid\"] = (\"i-\" + str(result.parliamentary_item_id))\n if type(result)==domain.AgendaItem:\n g = u\" \" + result.group.type + u\" \" + result.group.short_name\n else:\n g = u\"\" # !+ g?\n data[\"subject\"] = result.short_name\n data[\"title\"] = result.short_name\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"%ss/obj-%i\" % (\n result.type, result.parliamentary_item_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= u\"\"\n # remember original domain object\n data[\"id\"] = result.parliamentary_item_id\n data[\"_obj\"] = result\n # append processed result item\n data_list.append(data)\n self._data = data_list", "def parse_search_result(self, response):\n item = ElectronicItem()\n\n part_number = cleansplit(Selector(text=response.body)\n #.css(\"li.ttipartnumber a ::text/li[@class='ttipartnumber']/a/text()\"))\n #.css(\"li.ttipartnumber a::text\"))\n .xpath(\"//meta[@itemprop='sku']/@content\"))\n manufacturer_part_number = cleansplit(Selector(text=response.body)\n .xpath(\"//meta[@itemprop='mpn']/@content\"))\n #.css(\"li.mfrpartnumber a::text\"))\n manufacturer_name = cleansplit(Selector(text=response.body)\n .xpath(\"//td[@class='oc_row']/div/img/@title\"))\n #.css(\"li.manufacturer::text\"))\n description = cleansplit(Selector(text=response.body)\n .xpath(\"//span[@itemprop='description']/text()\"))\n #.css(\"td.description::text\"))\n quantity_available = cleansplit(Selector(text=response.body)\n .xpath(\"//table[1]/tbody[1]/tr/td[5]//text()\"))\n #.css(\"td.availability::text\"))\n image_url = cleansplit(Selector(text=response.body)\n .xpath(\"//table[1]/tbody[1]/tr/td[2]/img[1]/@src\"))\n\n '''\n This is variable handler when no content in selected xpath. so this algorithm will keep list balanced.\n and alyways will process zip iteration. and return scaped item. see customfunction.py for listbalancer method'''\n if not quantity_available: quantity_available = listbalancer(part_number)\n if not image_url: image_url = listbalancer(image_url)\n if not description: description = listbalancer(description)\n\n for i, j, k, l, m, n in zip(part_number, manufacturer_part_number, manufacturer_name,\n description, quantity_available, image_url):\n item['manufacturer'] = k\n item['manufacturer_part_number'] = j\n item['supplier'] = self.spider_name\n item['supplier_part_number'] = i\n item['description'] = l\n item['image_url'] = n\n item['product_url'] = response.url\n item['stock_qty'] = cleanqty(m.replace(u'\\xa0', u''))\n yield item\n #next_url = response.xpath(\n # '//a[@id=\"ctl00_PlaceHolderMain_results_pagingFooter_ctl08_HyperLink6\"]//@href').extract_first()\n next_url = response.xpath(\"//nav[1]/ul[1]/li[4]/a[1]/@href\").extract_first()\n if self.debug: print \"Next URL -> %s\" % (next_url)\n if next_url:\n \"Following Next Page {0}\".format(response.urljoin(next_url))\n yield Request(response.urljoin(next_url), callback=self.parse_search_result, dont_filter=True)\n # items.append(dict(item))\n # return items", "def process_item_data(self, db, ref, response):\n raise Exception(\"To be implemented\")", "def scrap(self, **kwargs):\n field = kwargs['field']\n data = kwargs['data']\n\n if data:\n if 'xpath' in data:\n if data['xpath']:\n for xpath in data['xpath']:\n output = self.use_x_path(xpath)\n if output['value']:\n return output\n\n if 'regex' in data:\n if data['regex']:\n for regex in data['regex']:\n output = self.use_regex(regex)\n if output['value']:\n return output\n\n return {'value': self.RESPONSE_DATA[field], 'method': None}", "def parse_item(self, response):\n self.check_Tor_time()\n print(\"Looking\", response.url)\n # Create the loader using the response\n l = ItemLoader(item=PropertiesItem(), response=response)\n l.default_output_processor = TakeFirst()\n try:\n self.fill_from_Json(l)\n except Exception as e:\n print('exception->', e)\n print('1')\n for node in response.css('div.padding-phone-only > .padding-small-top'):\n try:\n title = node.xpath('div[1]/h6/text()').extract()\n except Exception as e:\n print 1, e\n print('title:', title)\n try:\n val = node.xpath('div[2]/text()').extract()\n except Exception as e:\n print 2, e\n try:\n if \"code\" in title[0]:\n l.add_value('unique_id', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Bedrooms\" in title[0]:\n l.add_value('property_rooms_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Construction\" in title[0]:\n l.add_value('construction_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Modified\" in title[0]:\n l.add_value('on_site_date', node.xpath('div[2]/time/text()').extract()[0],\n MapCompose(\n lambda i: parse(i, fuzzy=True)))\n print(node.xpath('div[2]/time/text()').extract())\n except Exception as e:\n print 3, e\n print('2')\n # Housekeeping fields\n l.add_value('url', response.url)\n # l.add_value('spider', self.name)\n l.add_value('source', self.allowed_domains[0])\n l.add_value('imported_date', datetime.now())\n l.add_value('asset_type', 'realestate')\n l.add_value('transaction_type', 'commercial')\n tp = response.xpath(\n '//*[@id=\\\"breadCrumbs\\\"]/a[1]/text()').extract()[0]\n print('3')\n if \"Sales\" in tp:\n l.replace_value('property_buy_or_rent', \"sale\")\n else:\n l.replace_value('property_buy_or_rent', \"rent\")\n if \"residential\" in tp:\n l.add_value('category_major', \"residential\")\n elif \"commercial\" in tp:\n l.add_value('category_major', \"commercial\")\n else:\n l.add_value('category_major', \"land\")\n # a = l.load_item()\n # print(a)\n # return\n print('4')\n\n print(l)\n return l.load_item()", "def process(self):\n (headers, data) = _Event.process(self)\n \n generic_transforms.to_int(headers, ('ListItems',), -1)\n \n return (headers, data)", "def process(self):\n (headers, data) = _Event.process(self)\n \n generic_transforms.to_int(headers, ('ListItems',), -1)\n \n return (headers, data)", "def process_metadata_items(self):\n for item_id, item in self.metadata.items():\n assert item_id not in self.processed_metadata, 'Item {} presents twice'.format(item_id)\n self.processed_metadata[item_id] = {}\n for field, field_vals in item['metadata'].items():\n curr_field = ''\n # availability field is always empty\n if field == 'availability' or field == 'url':\n continue\n values = field_vals\n if field == 'availableSizes' and not isinstance(values, list,):\n values = self.repair_size_list(values)\n\n #field_tokens = tokenizer.tokenize(field)\n field_tokens = re.split('_|\\s', field)\n for tok in field_tokens:\n cleaned_tok = self._ATTR2STR[tok.lower()] if tok.lower() in self._ATTR2STR else tok.lower()\n curr_field += cleaned_tok + ' '\n curr_field = curr_field[:-1]\n \n curr_val = ''\n proc_values = []\n if isinstance(values, list,):\n for val in values:\n curr_val = ''\n #value_tokens = tokenizer.tokenize(val)\n value_tokens = re.split('_|\\s', val)\n proc_values.append(' '.join(value_tokens))\n else:\n value_tokens = re.split('_|\\s', values)\n proc_values.append(' '.join(value_tokens))\n\n #metadata JSON files contains different samples having hemLenght field twice.\n # In this case just discard the one with no values.\n if curr_field == 'hem length' and curr_field in self.processed_metadata[item_id]:\n if not len(self.processed_metadata[item_id][curr_field]):\n self.processed_metadata[item_id][curr_field] = proc_values\n continue\n assert curr_field not in self.processed_metadata[item_id], 'Field {} presents twice in item {}. Please remove one of them (preferably the empty one)'.format(curr_field, item_id)\n self.processed_metadata[item_id][curr_field] = proc_values", "def _scrape(self):", "def getItems(self): \n if self.itemCount > 0:\n \n site = getSite()\n \n \n # Make string path relative to the site root\n # E.g. string path \"news\" becomes \"/yoursiteid/news\"\n site_path = site.getPhysicalPath();\n \n path = \"/\".join(site_path) + \"/\" + self.path \n \n types = [self.itemPortalType]\n \n items = []\n \n #if self.itemPortalType2 != None:\n # types.append(self.itemPortalType2) \n \n #print \"Querying by:\" + type + \" \" + path\n content_by_type = self.context.portal_catalog(path={ \"query\": path, \"depth\" :9 }, \n portal_type=self.itemPortalType, \n sort_on=\"created\", \n sort_order=\"reverse\")[0:self.itemCount]\n\n content_by_type = list(content_by_type)\n \n if self.itemPortalType2 != None:\n content_by_type2 = self.context.portal_catalog(path={ \"query\": path, \"depth\" :9 }, \n portal_type=self.itemPortalType2, \n sort_on=\"created\", \n sort_order=\"reverse\")[0:self.itemCount]\n\n content_by_type += list(content_by_type2)\n\n \n items += [ brain.getObject() for brain in content_by_type ]\n else:\n items = []\n \n #if self.title == \"Daily deals\":\n # import pdb ; pdb.set_trace()\n \n # XXX: custom hack for deals\n def is_expired_deal(i):\n \"\"\"\n \"\"\"\n if hasattr(i, \"validUntil\"):\n now = datetime.datetime.utcnow()\n if now > i.validUntil:\n return True\n \n return False\n \n items = [ i for i in items if not is_expired_deal(i) ]\n \n return items", "def main(self):\n\n\t\tparsed_href = self.__load_webpage()\n\n\t\tsmart_attr_to_drive_list_map = self.__get_smart_attr_headers_params(parsed_href)\n\t\tdrives_by_manufacturer = self.__get_drives_grouped_by_mfg(smart_attr_to_drive_list_map)\n\t\tdrive_attributes, mfg_reports_same_attrs = self.__manufacturer_reported_params(smart_attr_to_drive_list_map)\n\t\t\n\t\treturn smart_attr_to_drive_list_map, drives_by_manufacturer, drive_attributes, mfg_reports_same_attrs", "def scraper_data(self):\n self.lock.acquire()\n for item in s.item:\n item_name = item.get(\"item\")\n item_url = item.get(\"url\")\n item_stock, item_cost = self.scraper.ChooseScraper(item_url)\n s.updateStatus(item_name, item_url, item_stock, item_cost)\n time.sleep(1)\n\n self.lock.release()", "def _process(self):\n self.kwargs[\"collect\"].process_scan_form_data(self.kwargs[\"data\"])", "def _process(self):\n self.output[\"data\"] = get_form_data(self.kwargs[\"collect\"].ona_scan_form_pk)", "def parse(self, response):\n content = response.body\n if not content:\n return\n sel = Selector(response)\n #print sel.xpath('//table[@class=\"board-list tiz\"]/tr').extract()\n for job in sel.xpath('//ul[@class=\"sojob-list\"]/li'):\n #print 'd',job\n info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"job-info\"]')\n com_info = job.xpath('div[@class=\"sojob-item-main clearfix\"]/div[@class=\"company-info nohover\"]')\n title = info.xpath('h3/a/text()').extract_first().lower()\n if title.find('python') != -1:\n url = info.xpath('h3/a/@href').extract_first()\n request = scrapy.Request(url=url,\n callback=self.parse_items,\n headers=self.spider.headers,\n cookies=self.cookies)\n company_item, job_item = CompanyItem(), JobItem()\n company_item['name'] = com_info.xpath('p[@class=\"company-name\"]/a/text()').extract_first()\n company_item['homepage'] = com_info.xpath('p[@class=\"company-name\"]/a/@href').extract_first()\n job_item['pub_time'] = info.xpath('p[@class=\"time-info clearfix\"]/time/text()').extract_first()\n year = str(date.today().year)\n if str(year) not in job_item['pub_time']:\n if job_item['pub_time'] == u'昨天':\n job_item['pub_time'] = (date.today()-timedelta(days=1)).strftime(\"%Y-%m-%d\")\n elif job_item['pub_time'] == u'前天':\n job_item['pub_time'] = (date.today() - timedelta(days=2)).strftime(\"%Y-%m-%d\")\n else:\n job_item['pub_time'] = date.today().strftime(\"%Y-%m-%d\")\n job_item['title'] = title\n job_item['welfare'] = ' '.join(com_info.xpath('p[@class=\"temptation clearfix\"]/span/text()').extract())\n job_item['salary'] = info.xpath('p[@class=\"condition clearfix\"]/span[@class=\"text-warning\"]/text()').extract_first()\n request.meta['company_item'] = company_item\n request.meta['job_item'] = job_item\n yield request", "def _process_config_item(item, dirname):\n item = copy.deepcopy(item)\n html = item.get(\"html\", None)\n\n if not html:\n raise UserWarning(\"Can't find HTML source for item:\\n%s\" % str(item))\n\n # process HTML link\n link = html if \"://\" in html else os.path.join(dirname, html)\n del item[\"html\"]\n\n # replace $name with the actual name of the field\n for key, val in item.items():\n if \"notfoundmsg\" in val:\n val[\"notfoundmsg\"] = val[\"notfoundmsg\"].replace(\"$name\", key)\n\n return {\n \"html\": _get_source(link),\n \"link\": link,\n \"vars\": item\n }", "def report_data(self):\n return {}", "def _download(self, request_dict={}):\n self.items = []\n html_tree = super()._download(request_dict=request_dict)\n for item in html_tree.xpath(\"//item\"):\n case_name = item.xpath(\"./title/text()\")[0].split(\":\", 1)[1]\n if case_name.strip():\n self.items.append(item)\n\n # Set self.html to None so it can't be misused.\n return None", "def scrapeInfoForItem(self, subpage, item):\n\t\thtmlcontent = self.HttpHandler.getHtmlContentFromLink(item.link)\n\t\tsoupPage = BeautifulSoup(htmlcontent, \"html.parser\")\n\n\t\t# brand\n\t\tresult = soupPage.findAll(\"p\", { \"class\" : \"product-brand--details\" })\n\t\tif len(result) > 0:\n\t\t\tres1 = result[0].find(\"a\")\n\t\t\tif res1 == None:\n\t\t\t\titem.Brandname = str(result[0].contents[0])\n\t\t\telif len(res1) > 0:\n\t\t\t\titem.Brandname = str(res1.contents[0])\n\n\t\t# Name\n\t\tresult = soupPage.findAll(\"h1\", { \"class\" : \"product-title--details\" })\n\t\tif len(result) > 0:\n\t\t\tres1 = result[0].find(\"span\", { \"itemprop\" : \"name\" })\n\t\t\tif len(res1) > 0:\n\t\t\t\titem.Productname = str(res1.contents[0])\n\n\t\t# Color\n\t\tresults = soupPage.findAll(\"a\", { \"class\" : \"js-switch-colourVariant\" })\n\t\tif len(results) == 0:\n\t\t\tresult2 = soupPage.findAll(\"h1\", { \"class\" : \"product-title--details\" })\n\t\t\tif len(result) > 0:\n\t\t\t\tres2 = result2[0].find(\"span\", { \"itemprop\" : \"color\" })\n\t\t\t\tif len(res2) > 0:\n\t\t\t\t\titem.Colors = str(res2.contents[0])\n\t\telse:\n\t\t\titem.Colors = \"|\".join([res[\"title\"] for res in results])\n\n\t\t# size\n\t\tresults = soupPage.findAll(\"span\", { \"class\" : \"product-sizeLabel\" })\n\t\titem.Sizes = \"|\".join([res.contents[0] for res in results])\n\n\t\t# beschreibung\n\t\tresult = soupPage.find(\"ul\", { \"class\" : \"product-infoList--twoCol\" })\n\t\tif result:\n\t\t\tresults = result.findAll(\"span\")\n\t\t\titem.Description = \"|\".join([res.contents[0] for res in results])\n\n\t\t# material \n\t\tresults = soupPage.find(\"ul\", { \"class\" : \"product-infoList\" })\n\t\tif results:\n\t\t\tresults = results.findAll(\"span\")\n\t\t\titem.Materials = \"|\".join([res.contents[0] for res in results])\n\n\t\t# pflege\n\t\tresults = soupPage.find(\"ul\", { \"class\" : \"product-care\" })\n\t\tif results:\n\t\t\tresults = results.findAll(\"li\")\n\t\t\titem.Maintenance = \"|\".join([res.get_text() for res in results])\n\n\t\t# current, regular price (current can be reduced)\n\t\tresult = soupPage.find(\"meta\", { \"itemprop\" : \"price\" })\n\t\tif result:\n\t\t\tresult = result[\"content\"]\n\t\t\tif \",\" in result:\n\t\t\t\tresult = str(result).replace(',','.')\n\t\t\tif u'\\xa0' in result:\n\t\t\t\tresult = result.replace(u'\\xa0', u' ')[:-1] # there is a € sign at the end\n\t\t\tif \"ab\" in result:\n\t\t\t\titem.CurrentPrice = result\n\t\t\telse:\n\t\t\t\titem.CurrentPrice = float(result)\n\t\tresult = soupPage.find(\"span\", { \"class\" : \"is-regular\" })\n\t\tif result:\n\t\t\tif \",\" in result.contents[0]:\n\t\t\t\tresult = str(result.contents[0]).replace(',','.')\n\t\t\tif u'\\xa0' in result:\n\t\t\t\tresult = result.replace(u'\\xa0', u' ')[:-1] # there is a € sign at the end\n\t\t\tif \"ab\" in result:\n\t\t\t\titem.RegularPrice = result\n\t\t\telse:\n\t\t\t\titem.RegularPrice = float(result)\n\t\telse:\n\t\t\titem.RegularPrice = item.CurrentPrice", "def parse_abstract(self, response):\n\n if self.is_visited(response.url) == True:\n return None\n \n\n hxs = HtmlXPathSelector(response)\n item = ReportAbstractItem()\n\n url = response.url\n title = hxs.select(\"//td[@class='f20blue tdc']/text()\").extract()[0]\n date = hxs.select(\"//div[@class='f_black f_14']/text()\").extract()[0]\n abstract = hxs.select(\"//table[@class='f_black f_14']//td\").extract()[0]\n link = hxs.select(\"//a[contains(@href,'ShowNotesDocumentFile')]/@href\").extract()[0]\n link = \"http://www.gtja.com\" + link\n \n item[\"url\"] = unquote(response.url)\n item[\"title\"] = title\n item[\"date\"] = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n item[\"abstract\"] = abstract\n item[\"link\"] = link\n item[\"create_date\"] = datetime.datetime.now()\n \n time_delta = datetime.datetime.now() - item[\"date\"]\n if settings[\"EXPIRE_DAYS\"] and time_delta.days >= settings[\"EXPIRE_DAYS\"]:\n self.expired = True\n \n if self.expired == True:\n return\n \n self.visit(response.url)\n\n return item", "def process_item(self, item):\n self.logger.debug(\"Calculating site descriptors for {}\".format(\n item[self.materials.key]))\n\n struct = Structure.from_dict(item['structure'])\n\n site_descr_doc = {'structure': struct.copy()}\n site_descr_doc['site_descriptors'] = \\\n self.get_site_descriptors_from_struct(\n site_descr_doc['structure'])\n site_descr_doc['statistics'] = \\\n self.get_statistics(\n site_descr_doc['site_descriptors'])\n site_descr_doc[self.site_descriptors.key] = item[self.materials.key]\n\n return site_descr_doc", "def report_preparation(data):\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n fd = open(f\"{report_file_path}/mail_report.html\", \"w\")\n fd.write(\n \"\"\"\n <html>\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html charset=UTF-8\" />\n <style>\n table {\n font-family: arial, sans-serif;\n border-collapse: collapse;\n width: 100%;\n }\n\n th {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n td {\n border: 1px solid #000000;\n text-align: center;\n padding: 8px;\n }\n </style>\n </head>\n\n <body>\n <p><font color=\"black\"> Hi All </font></p>\n \"\"\"\n )\n fd.write(\n \"\"\"\n <p><font color=\"black\">{}\n </font></p>\n <table>\n <thead>\n <tr>\n <th> Job Category </th>\n <th> Highlighted information/Test Failure</th>\n <th> Job URL </th>\n <th> Bugzilla </th>\n <th> Job Status </th>\n </tr></thead> \"\"\".format(\n data[\"body\"]\n )\n )\n data.pop(\"body\")\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n\n if os.path.isfile(f\"{report_file_path}/subject\"):\n os.remove(f\"{report_file_path}/subject\")\n if os.path.isfile(f\"{report_file_path}/recipient\"):\n os.remove(f\"{report_file_path}/recipient\")\n with open(f\"{report_file_path}/subject\", \"wb\") as handler:\n pickle.dump(data[\"subject\"], handler)\n data.pop(\"subject\")\n\n with open(f\"{report_file_path}/recipient\", \"wb\") as handler:\n pickle.dump(data[\"recipient\"], handler)\n data.pop(\"recipient\")\n for _ in data:\n fd.write(\"<tr><td>{}</td>\".format(_, data[_]))\n fd.write(\"<td>\")\n for content in data[_][\"highlighted_information\"]:\n if (content.lstrip()).rstrip():\n if re.search(r\"tests.\", f\"{content}\"):\n fd.write(\n f'<font color=red><li align=\"left\">{(content.lstrip()).rstrip()}</li></font>'\n )\n else:\n fd.write(f'<li align=\"left\">{(content.lstrip()).rstrip()}</li>')\n fd.write(\"</td>\")\n fd.write(f\"<td><a href={data[_]['Build Url']}>Job Link</a></td>\")\n fd.write(\"<td>\")\n for bz in data[_][\"bugzilla\"].split(\".\"):\n if bz.lstrip().rstrip():\n fd.write(\n f\" <a href=https://bugzilla.xyz.com/show_bug.cgi?id={bz}>{bz}</a> \"\n )\n else:\n fd.write(f\"{bz}\")\n fd.write(\"</td>\")\n if data[_][\"Build_Status\"] == \"SUCCESS\":\n color = \"green\"\n fd.write(f\"<td><font color={color}>PASSED</font></td>\")\n else:\n color = \"red\"\n fd.write(f\"<td><font color={color}>FAILED</font></td>\")\n fd.write(\n \"\"\"\n </table>\n </body>\n <p><font color=\"black\">Note: For more details</font>\n <form action=\"https://wikipage></form></p>\n <p><font color=\"black\">Thanks</font><br>\n <font color=\"black\">xyz</font><p>\n </html>\"\"\"\n )\n fd.close()\n Common.logger.info(\"Report prepared for the selected job and their type\")", "def parse(self, response):\n for sel in response.xpath('//*[@id=\"J_goodsList\"]/ul/li[@class=\"gl-item\"]'):\n \"\"\"iterate all items in this page\"\"\"\n sku = sel.xpath('.//@data-sku').extract_first()\n price = float(sel.xpath('.//div/div[3]/strong/i/text()').extract_first())\n name = ''.join(sel.xpath('.//div/div[4]/a/em/descendant-or-self::node()/text()').extract())\n seller = sel.xpath('.//div/div[7]/span/a/text()').extract_first()\n sku_url = \"http:\" + sel.xpath('.//div/div[1]/a/@href').extract_first()\n\n yield Request(sku_url,\n callback=self.parse_item,\n meta = {'sku' : sku,\n 'price' : price,\n 'name' : name,\n 'seller' : seller})\n #make the request of individual page", "def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()", "def get_dict(self):\n return {\n \"type\": self.item_type,\n \"size\": self.size,\n \"toppings\": self.toppings,\n \"price\": self.get_price()\n }", "def __getitem__(self, item):\n if isinstance(item, TestGroupReport):\n try:\n return self.data[(type(item), item.category)]\n except KeyError:\n pass\n return super(ReportRendererRegistry, self).__getitem__(item)", "def _setup_report_data(self):\n # current_app.logger.debug('Setup report data template starting.')\n template = self._get_template()\n current_app.logger.debug('Setup report data template completed, setup data starting.')\n data = {\n 'reportName': self._get_report_filename(),\n 'template': template,\n 'templateVars': self._get_template_data()\n }\n current_app.logger.debug('Setup report data completed.')\n return data", "def get_final_result(self, spider):\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id,\n '{cnpj}-data_collected.json'.format(cnpj=self.cpf_cnpj))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url,\n callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)", "def get_item_info(self, item, start_time=date.today(), end_time=date.today() + timedelta(days=+1), arg=None):\n ret_info = {}\n if arg is not None:\n index = self._items[item].index(arg)\n assert index != -1\n ret_info[arg] = self.retrive_url(self._linkurl_dict[arg], convert_date_time(start_time), convert_date_time(end_time))\n else:\n\n for req_index in self._items[item]:\n ret_info[req_index] = self.retrive_url(self._linkurl_dict[req_index], convert_date_time(start_time), convert_date_time(end_time))\n return ret_info", "def extract(self, response):\n\n #grab the BusinessItem passed in from the caller\n i = None\n try:\n i = response.meta['item']\n except Exception:\n i = BusinessItem()\n\n log.msg('passed in item={0}'.format(i), log.DEBUG)\n\n l = BusinessLoader(item=i, response=response)\n\n #Assume url pattern is /<addressLocality>/<category>/<duid>/<name>.html\n data_uid = re.match(pattern=u'.*COMPANYID=(\\d+)$', string=response.url).group(1).lstrip('0')\n\n l.add_xpath('description', '//*[@id=\"ctl00_ctl00_body_maincontentblock_lblProductandServices\"]/ text()')\n\n #List of strings which, when joined, form the address. form is <streetAddress>, <optional: streetAddress>, <addressLocality and state and postalCode>\n address_fields = response.xpath('//*[@id=\"ctl00_ctl00_body_maincontentblock_lblcoAddress\"]/ text()').extract()\n m = re.match(pattern=u'^([\\w\\s]*),\\s+([\\w\\s]+)[\\xa0]+(\\S+)$', string=address_fields[-1])\n\n l.add_value('streetAddress', address_fields[0])\n\n if len(address_fields) is 3:\n l.add_value('streetAddress', address_fields[1])\n\n l.add_value('addressLocality', m.group(1))\n l.add_value('addressRegion', m.group(2))\n l.add_value('postalCode', m.group(3))\n\n #Extract any social media links\n social_media_links = response.xpath('//table[@id=\"ctl00_ctl00_body_maincontentblock_gvSocialMedia\"]//a/ @href').extract()\n for link in social_media_links:\n if 'linkedin.com' in link:\n l.add_value('linkedin', unicode(link))\n elif 'twitter.com' in link:\n l.add_value('twitter', unicode(link))\n elif 'facebook.com' in link:\n l.add_value('facebook', unicode(link))\n\n l.add_value(\"data_uid\", unicode(data_uid))\n l.add_value(\"data_url\", unicode(response.url))\n\n return l.load_item()", "def get_item_dict(self, item):\n item_values = [\n 'item-name', 'current-amount', 'item-price', 'item-cost']\n item_dict = {}\n for value in item_values:\n key = value.split('-')[1]\n item_dict[key] = item.find_element_by_class_name(value)\n item_dict['id'] = item_dict['amount'].get_attribute('data-item_id')\n\n ch_amount = item.find_elements_by_class_name('change-amount')\n for button in ch_amount:\n action = button.get_attribute('data-action')\n item_dict[action] = button\n\n return item_dict", "def _process_group(self):\n if not isinstance(self.transform, GroupTransformModel):\n return\n\n self._process_name()\n\n if self.transformed_item['type'] == 'Campaign':\n self._process_metadata_datetime('firstSeen', self.transform.first_seen)\n\n if self.transformed_item['type'] == 'Document':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('malware', self.transform.malware)\n self._process_metadata('password', self.transform.password)\n\n if self.transformed_item['type'] == 'Email':\n self._process_metadata('body', self.transform.body)\n self._process_metadata('from', self.transform.from_addr)\n self._process_metadata('header', self.transform.header)\n self._process_metadata('subject', self.transform.subject)\n self._process_metadata('to', self.transform.to_addr)\n\n if self.transformed_item['type'] in ('Event', 'Incident'):\n self._process_metadata_datetime('eventDate', self.transform.event_date)\n self._process_metadata('status', self.transform.status)\n\n if self.transformed_item['type'] == 'Report':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata_datetime('publishDate', self.transform.publish_date)\n\n # Handle sig specific fields here\n if self.transformed_item['type'] == 'Signature':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('fileType', self.transform.file_type)\n self._process_metadata('fileText', self.transform.file_text)", "def process_item(self, item, spider):\n try:\n s = self.session()\n if isinstance(item, (PomItem, type(PomItem()), type(PomItem))):\n self.store_pom(item, s)\n elif isinstance(item, (AscItem, type(AscItem()), type(AscItem))):\n self.store_asc(item, s)\n elif isinstance(item, (ArtifactItem, type(ArtifactItem()), type(ArtifactItem))):\n self.store_index(item, s)\n elif isinstance(item, LinkItem):\n pass\n else:\n logger.warning('Unknown item: %s type %s' % (item, type(item)))\n return\n\n s.commit()\n s.flush() # writes changes to DB\n s.expunge_all() # removes objects from session\n except Exception as e:\n logger.warning('Exception in storing key %s' % e)\n\n finally:\n utils.silent_close(s)\n s = None\n return item", "def parse(self):\n result = {}\n if self.detail_statu:\n sel = Selector(text=self.driver.page_source)\n\n fact_table = sel.xpath(\n '//div[@class=\"facts-table\"]//text()').extract()\n result['facts'] = [list(i)\n for i in zip(fact_table[:: 2],\n fact_table[1:: 2])]\n\n tax_table = sel.xpath(\n '//div[@class=\"tax-values\"]//text()').extract()\n result['taxs'] = [list(i)\n for i in zip(tax_table[:: 2],\n tax_table[1:: 2])]\n\n listing_detail = sel.xpath(\n '//div[@class=\"amenities-container\"]//text()').extract()\n result['detail'] = listing_detail\n result['page_source'] = self.driver.page_source\n self.detail_statu = False\n else:\n self.log.warning(\n '---- Detail page url out of reach, use .search() first to get the detail page')\n return result", "def parse_items(self, response):\n items = JobcrawlerItem()\n\n current_date = datetime.now()\n current_date_str = current_date.strftime(\"%b %d %Y %H:%M:%S\")\n\n items[\"timestamp\"] = current_date_str\n items[\"site\"] = self.allowed_domains[0]\n items[\"full_html\"] = response.text\n items[\"job_post_url\"] = response.request.url\n items[\"full_text\"] = \" \".join(response.xpath('//div[@id=\"aggelia-text\"]//text()').re('(\\w+)'))\n\n extracted_title = response.xpath('//h3[@id=\"aggelia-title\"]/text()').extract()\n if extracted_title:\n items[\"job_title\"] = extracted_title[0]\n else:\n items[\"job_title\"] = \"\"\n\n job_requirements = response.xpath(self.requirements_xpath).extract()\n requirements_list = list(filter(lambda item: item.strip() != '', job_requirements))\n items[\"job_requirements\"] = \" \".join(requirements_list).replace('\\n', '')\n\n return items", "def _process_dict(data):\n new_dict = {}\n for key in data.keys():\n\tnew_dict['name'] = data['printerName']\n #new_dict[key] = data[key]\n\n #FIGURE OUT AND UPDATE PRINTER STATUS\n status = BUSY_STATUS\n error_msg = \"\"\n if \"FrontPanelMessage\" in data:\n if data[\"FrontPanelMessage\"].lower() in READY_MESSAGES:\n status = READY_STATUS\n elif \"error\" in data[\"FrontPanelMessage\"].lower():\n status = ERROR_STATUS\n error_msg = \"general error\"\n \n if \"TonerStatus\" in data:\n if data[\"TonerStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Toner Error\"\n #if len(new_dict[\"TonerStatus\"]) > 4:\n #new_dict[\"TonerStatus\"] = new_dict[\"TonerStatus\"][4:]\n\n if \"PaperStatus\" in data:\n if data[\"PaperStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Status Error\"\n elif data[\"PaperStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Out of Paper\"\n #if len(new_dict[\"PaperStatus\"]) > 4:\n #new_dict[\"PaperStatus\"] = new_dict[\"PaperStatus\"][4:]\n\n if \"PaperJamStatus\" in data:\n if data[\"PaperJamStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Jam\"\n #if len(new_dict[\"PaperJamStatus\"]) > 4:\n #new_dict[\"PaperJamStatus\"] = new_dict[\"PaperJamStatus\"][4:]\n\n new_dict[\"status\"] = status\n new_dict[\"error_msg\"] = error_msg\n new_dict[\"location\"] = PRINTERS[new_dict[\"name\"]][0]\n new_dict[\"building_name\"] = PRINTERS[new_dict[\"name\"]][1]\n new_dict[\"latitude\"] = PRINTERS[new_dict[\"name\"]][2]\n new_dict[\"longitude\"] = PRINTERS[new_dict[\"name\"]][3]\n new_dict[\"atResidence\"] = PRINTERS[new_dict[\"name\"]][4]\n return new_dict", "def get_report(self):\n self.ensure_one()\n common_log_book_obj = self.env['common.log.book.ept']\n result = {}\n seller = self.seller_id\n if not seller:\n raise UserError(_('Please select seller'))\n if not self.report_id:\n return True\n\n kwargs = self.prepare_amazon_request_report_kwargs(self.seller_id)\n kwargs.update({'emipro_api': 'get_report_v13',\n 'report_id': self.report_id,\n 'amz_report_type': 'vcs_tax_report'})\n response = iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/iap_request', params=kwargs, timeout=1000)\n if response.get('reason'):\n if self._context.get('is_auto_process'):\n common_log_book_obj.create({\n 'type': 'import',\n 'module': 'amazon_ept',\n 'active': True,\n 'log_lines': [\n (0, 0, {'message': 'VCS Report Process ' + response.get('reason')})]\n })\n else:\n raise UserError(_(response.get('reason')))\n else:\n result = response.get('result')\n if result:\n file_name = \"VCS_Tax_report_\" + time.strftime(\"%Y_%m_%d_%H%M%S\") + '.csv'\n attachment = self.env['ir.attachment'].create({\n 'name': file_name,\n 'datas': result.encode(),\n 'res_model': 'mail.compose.message',\n 'type': 'binary'\n })\n self.message_post(body=_(\"<b>VCS Tax Report Downloaded</b>\"),\n attachment_ids=attachment.ids)\n self.write({'attachment_id': attachment.id})\n return True", "def _setData(self):\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n data_list = []\n results = self.query.all()\n \n # if no current parliament, no data\n try:\n parliament_id = model_utils.get_current_parliament().parliament_id\n except: \n return data_list\n #\n government_id = self.__parent__.government_id\n for result in results:\n data = {}\n data[\"qid\"] = \"g_%s\" % (result.group_id)\n data[\"subject\"] = result.short_name\n data[\"title\"] = \"%s (%s)\" % (result.short_name, result.type)\n data[\"result_item_class\"] = \"workflow-state-%s\" % (result.status)\n _url = \"/archive/browse/parliaments/obj-%s\" % (parliament_id)\n if type(result) == domain.Parliament:\n data[\"url\"] = url.set_url_context(_url)\n continue\n elif type(result) == domain.Committee:\n #data[\"url\"] = url + \"/committees/obj-\" + str(result.group_id) \n data[\"url\"] = url.set_url_context(\"/groups/%s/%s\" % (\n result.parent_group.group_principal_id,\n result.group_principal_id))\n elif type(result) == domain.PoliticalGroup:\n data[\"url\"] = url.set_url_context(\n \"%s/politicalgroups/obj-%s\" % (_url, result.group_id))\n elif type(result) == domain.Ministry:\n data[\"url\"] = url.set_url_context(\n \"%s/governments/obj-%s/ministries/obj-%s\" % (\n _url, government_id, result.group_id))\n else:\n data[\"url\"] = \"#\"\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"\"\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def _handle_search_results(self, response: TextResponse) -> ScrapyYelpItem:\n\n # get yConfig\n pattern = re.compile(r\"\"\"\\n\\s+yConfig\\s+=\\s+\"\"\", re.MULTILINE | re.DOTALL)\n soup = BeautifulSoup(response.text, \"html.parser\")\n script = soup.find(\"script\", text=pattern)\n myjson = script.get_text()\n # remove start pattern (js assignment)\n s = re.sub(pattern, '', myjson)\n # remove html (parser problems)\n s = re.sub('<[^<]+?>', '', s)\n # remove last semi colon (end-of-data)\n s = s[0:s.rfind(';')]\n json_object = json.loads(s,strict=False)\n\n keys = [x for x in json_object[\"js_display\"][\"hovercard_data\"] if x.isnumeric()]\n # first part is the hovercard data - which contains most of the aggregate biz informative\n # such as total_reviews and summary_score\n df_hovercard_data = pd.DataFrame()\n for x in keys:\n tmpdf = json_normalize(json_object[\"js_display\"][\"hovercard_data\"][x])\n df_hovercard_data = df_hovercard_data.append(tmpdf,ignore_index=True)\n\n df_hovercard_data = df_hovercard_data.set_index(\"result_number\")\n df_hovercard_data.index = df_hovercard_data.index.astype(int)\n # second part is the resourceid which might be useful later on, not sure if this is used at all, but\n # it serves as a good example of how to join to other \"parts\" of the nested json structure and flatten it\n df_markers = json_normalize(json_object[\"js_display\"][\"map_state\"][\"markers\"])\n df_markers = df_markers[df_markers['resourceType'] == 'business'].loc[:, [\"url\",\"resourceId\",\"hovercardId\",\"label\",\"location.latitude\",\"location.longitude\",]]\n df_markers = df_markers.set_index('label')\n df_markers.index = df_markers.index.astype(int)\n\n # combine data into a single dataframe which will eventually be written out by our pipeline\n df = df_hovercard_data.join(df_markers)\n\n # at this point we want to also scrape the indvidual biz listing for the menu, syntax is verbose here\n\n\n ## deubg write to file\n #json_formatted = json.dumps(json_object, indent=2)\n # print(json_formatted)\n # with open(\"files/\"+'blah.json', 'wb') as file:\n # file.write(str.encode(json_formatted))\n\n \"\"\"\n\n Here is a smample of what the yConfig object looks like:\n\n json_object.keys() ====>\n ['cookies', 'gaConfig', 'adjustAndroidPaidTrafficUrl', 'webviewFlow', 'enabledSitRepChannels',\n isWebviewRequest', 'js_display', 'isLoggedIn', 'uaInfo', 'isSitRepEnabled', 'comscore', 'isBugsnagEnabled',\n 'support', 'deprecatedEncryptedYUV', 'vendorExternalURLs', 'smartBannerFallbackActive', 'version',\n 'recaptchaV3PublicKey', 'googlePlacesUrl', 'redesignActive', 'currentBaseLang', 'isClientErrorsEnabled',\n 'uniqueRequestId', 'yelpcodeTemplateVersion', 'appInstallDialogEnabled', 'smartBannerPersistent',\n 'imageUrls', 'siteUrl', 'referrer', 'webviewInfo', 'cookieDomain', 'recaptchaPublicKey',\n 'send_user_agent_to_ga', 'pGifUrl']\n\n\n json_object[\"js_display\"].keys() ===>\n ['polyglot_translations', 'raq_links', 'locale', 'hovercard_data', 'is_first_ad_hovercard_opened',\n 'zoom', 'centerLng', 'map_state', 'advertising_business_id_list', 'centerLat', 'pager']\n\n json_object[\"js_display\"][\"hovercard_data\"] ==>\n '1': {'resource_id': None,\n 'result_number': 1,\n 'biz': {'alias': 'lou-malnatis-pizzeria-chicago',\n 'review_count': 5998,\n 'name': \"Lou Malnati's Pizzeria\",\n 'rating': 4.07785928642881,\n 'url': 'https://m.yelp.com/biz/lou-malnatis-pizzeria-chicago',\n 'price': '$$',\n 'categories': 'Pizza, Italian, Sandwiches',\n 'distance': '2.5 mi'},\n 'lat': 41.890357,\n 'lng': -87.633704,\n 'type': 'natural'},\n '2': {'resource_id': None,\n ....\n\n\n json_object[\"js_display\"][\"map_state\"][\"markers\"] ===>\n [{'resourceType': 'business',\n 'url': '/biz/lou-malnatis-pizzeria-chicago',\n 'resourceId': '8vFJH_paXsMocmEO_KAa3w',\n 'label': '1',\n 'shouldOpenInNewTab': False,\n 'location': {'latitude': 41.890357, 'longitude': -87.633704},\n 'key': 1,\n 'hovercardId': 'Q6nXAEw3UuAVFSztE4lPnA',\n 'icon': {'name': 'business',\n 'anchorOffset': [12, 32],\n 'activeOrigin': [24, 0],\n 'scaledSize': [48, 320],\n 'regularUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'size': [24, 32],\n 'activeUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'regularOrigin': [0, 0]}},\n {'resourceType': 'business',\n 'url': '/biz/pequods-pizzeria-chicago',\n 'resourceId': 'DXwSYgiXqIVNdO9dazel6w',\n 'label': '2',\n 'shouldOpenInNew\n ...\n\n \"\"\"\n #print(json_object[\"js_display\"][\"hovercard_data\"])\n\n\n\n return df", "def parse(self, response):\n item = NewsScraperItem()\n containers = response.xpath(\"//div[contains(@class,'largeTitle')]/article[contains(@class,\"\n \"'articleItem')]/div[contains(@class,'textDiv')]\")\n for info in containers:\n\n try:\n date = info.xpath(\".//div[contains(@class,'articleDetails')]/span[contains(@class,'date')]/text()\").extract_first()\n date = re.sub(r'\\xa0-\\xa0', '', date)\n # Convert 'minutes ago' to datetime\n date = datetime.now() - timedelta(minutes=int(re.sub(r'[^0-9]', '', date))) # Regex = Where not numeric\n item['date'] = date.strftime(\"%Y/%m/%d %H:%M:%S\")\n earn_id = re.search(r'[0-9]{4,}', info.xpath(\".//a/@onclick\").extract_first())\n item['id'] = earn_id.group()\n item['title'] = info.xpath(\".//a/text()\").extract_first()\n item['author'] = info.xpath(\".//div[contains(@class,'articleDetails')]/span/text()\").extract_first()\n item['text'] = info.xpath(\".//p/text()\").extract_first()\n item['link'] = info.xpath(\".//a/@href\").extract_first()\n yield item\n\n except:\n print(\"Unusual format detected\")\n logging.warning(\"Item skipped due to unusual format\")", "def process_item(self, item, spider):\n\n # strip non ascii chars\n item['raw_content'] = filter(lambda x : ord(x) < 128, item['raw_content'])\n #item['raw_content'] = ''.join(c for c in item['raw_content'] if ord(c) < 128)\n\n # hash the filename to prevent storing too-long file names\n hash_data = item['filename'] + item['user_agent'].ua_string\n filename = sha1(hash_data).hexdigest()\n\n # Javascript MIME types\n js_mimes = ('text/javascript',\n 'application/x-javascript',\n 'application/javascript')\n\n # Parse each file based on what its MIME specifies\n if 'text/html' == item['content_type']:\n # First save the request contents into a URLContent\n urlcontent,_ = model.URLContent.objects.get_or_create(\n url_scan=item['urlscan'],\n user_agent=item['user_agent'],\n defaults={'redirected_from':item['redirected_from']})\n\n # Store raw markup\n file_content = ContentFile(item['raw_content'])\n urlcontent.raw_markup.save(filename, file_content)\n urlcontent.raw_markup.close()\n\n # Store raw headers\n file_content = ContentFile(item['headers'])\n urlcontent.headers.save(filename, file_content)\n urlcontent.headers.close()\n\n urlcontent.save()\n\n elif any(mime == item['content_type'] for mime in js_mimes):\n urlcontent = model.URLContent.objects.get(\n url_scan=item['urlscan'],\n user_agent=item['user_agent'])\n\n linkedjs, _ = model.LinkedJS.objects.get_or_create(\n batch=spider.batch,\n url_hash=sha256(item['url']).hexdigest(),\n defaults={'url': item['url']},\n )\n\n # Store raw js\n file_content = ContentFile(item['raw_content'])\n linkedjs.raw_js.save(filename, file_content)\n linkedjs.raw_js.close()\n\n linkedjs.save()\n\n # Create relationship with url content\n linkedjs.linked_from.add(urlcontent)\n\n elif 'text/css' == item['content_type']:\n urlcontent = model.URLContent.objects.get(\n url_scan=item['urlscan'],\n user_agent=item['user_agent'])\n\n linkedcss, created = model.LinkedCSS.objects.get_or_create(\n batch = spider.batch,\n url_hash=sha256(item['url']).hexdigest(),\n defaults={\n 'url': item['url'],\n },\n )\n\n # Store raw css\n file_content = ContentFile(item['raw_content'])\n linkedcss.raw_css.save(filename, file_content)\n linkedcss.raw_css.close()\n\n linkedcss.save()\n\n # Create relationship with url content\n linkedcss.linked_from.add(urlcontent)\n\n if created:\n # Parse out rules and properties\n use_celery = getattr(settings, 'USE_CELERY', False)\n if use_celery:\n parse_css.delay(linkedcss)\n else:\n spider.log(\"Parsing css {0}\".format(linkedcss))\n self.css_parser.parse(linkedcss)\n spider.log(\"Ended parsing css {0}\".format(linkedcss))\n\n return item", "def run(self):\n for item_name in self.item_names:\n item = self.conn.get_attributes(self.domain_name, item_name)\n self.items.append(item)", "def parse_reports(self):\n txt = (\n self.unixtext\n if self.unixtext[:2] != \"\\001\\n\"\n else self.unixtext[2:]\n )\n\n lines = txt.split(\"\\n\")\n # There may be an AWIPSID in line 3 or silly aviation control char\n pos = 3 if len(lines[2]) < 10 or lines[2].startswith(\"\\x1e\") else 2\n meat = \"\".join(lines[pos:])\n for report in meat.split(\"=\"):\n if report.strip() == \"\":\n continue\n res = self.process_pirep(\" \".join(report.strip().split()))\n if res is not None:\n self.reports.append(res)", "def _process_agenda_item(self, agenda_item_data, agenda_item_id, meeting_id, meeting_time):\n pass", "def run(self):\r\n for item_name in self.item_names:\r\n item = self.conn.get_attributes(self.domain_name, item_name)\r\n self.items.append(item)", "def _parse_audit_items(self, items, function_name):\n for item in items:\n yield {\n \"snippet\": item[\"node\"][\"snippet\"],\n \"selector\": item[\"node\"][\"selector\"],\n \"colors\": self._extract_hex_codes(item[\"node\"][\"explanation\"]),\n \"pipeline\": [function_name],\n # path is in the format \"1,HTML,1,BODY,0,DIV,...\"\n # we only need to keep the numbers (as integers)\n \"path\": tuple(int(i) for i in item[\"node\"][\"path\"].split(\",\")[::2]),\n }", "def _parse_documents(self, item):\n documents = []\n agenda_url = item.css('a[href*=Agenda]::attr(href)').extract_first()\n if agenda_url:\n documents.append({'url': agenda_url, 'note': 'Agenda'})\n minutes_url = item.css('a[href*=Minutes]::attr(href)').extract_first()\n if minutes_url:\n documents.append({'url': minutes_url, 'note': 'Minutes'})\n video_url = item.css('td[headers~=VideoLink] a::attr(onclick)').extract_first()\n video_url_match = re.search(r'http.*(?=\\',\\'p)', video_url or '')\n if video_url and video_url_match:\n documents.append({'url': video_url_match.group(), 'note': 'Video'})\n return documents", "def getItems(self):\n fname = 'getItems'\n actionId = self._db.addAction('WebCrawler')\n actionId_ex = self._db.addAction('extractor')\n\n if not os.path.exists(self._haystackPath):\n self._haystackPath = os.path.expanduser(self._haystackPath)\n\n if not os.path.exists(self._haystackPath):\n self._haystackPath = os.path.abspath(self._haystackPath)\n\n print('\\t{0} [{1}]'.format(fname, self._haystackPath))\n\n for (pathStr, dirs, files) in os.walk(self._haystackPath):\n head, tail = os.path.split(pathStr)\n for fileStr in files:\n fileDTCheck = ''\n filePath = os.path.join(pathStr,fileStr)\n\n # get the file date...\n fileDT = datetime.datetime.fromtimestamp(os.path.getmtime(filePath)).replace(microsecond=0)\n fileSize = os.path.getsize(filePath)\n fileName, fileExt = os.path.splitext(filePath)\n\n # save the item to the database\n itemId = self._db.addItem(self._engine_id, \"file://%s\" % filePath, fileDT)\n \n # now check the data for this item...\n itemList = self._db.getItemDataAll(itemId) \n isMatch = False\n for item in itemList:\n if item[0] == 'FileDate':\n # we have a date string...\n fileDTCheck = datetime.datetime.strptime(item[1], \"%Y-%m-%d %H:%M:%S\")\n if fileDTCheck == fileDT:\n # the same time, no changes needed\n isMatch = True\n \n if isMatch:\n # get next item as this is already exists\n continue\n \n # print(the details)\n print(fileDTCheck, fileDT)\n print('>>\\t%s\\t%s\\t%s' % (fname, head, tail))\n \n # set the datetime and other details\n self._db.addItemData(itemId, 'Haystack', tail, 0)\n self._db.addItemData(itemId, 'FileName', fileName, 0)\n self._db.addItemData(itemId, 'FileExt', fileExt, 0)\n self._db.addItemData(itemId, 'FileDate', fileDT, 0)\n self._db.addItemData(itemId, 'FileSize', fileSize, 0)\n\n # now to process the file...\n # this will extract out metadata and add to the itemData table the value pairs.\n pattern = re.compile(r'^.*[.](?P<ext>htm|html)$')\n pattPNG = re.compile(r'^.*[.](?P<ext>mp.|mpeg|avi|swf|jpg|jpeg|png)$')\n pattTAR = re.compile(r'^.*[.](?P<ext>tar\\.gz|tar\\.bz2|\\.zip|\\.tar|\\.7z)$')\n\n m = pattern.match(filePath)\n if not m:\n m = pattPNG.match(filePath)\n\n if not m:\n m = pattTAR.match(filePath)\n\n if not m:\n self.getContents(itemId, filePath, tail)\n self._db.updateItem(self._engine_id, itemId, actionId_ex, datetime.datetime.now())\n\n else:\n # we have a file extension...\n if m.group('ext').startswith('.htm'):\n # add this as an event to be processed by the html link reader...\n self._db.addItemEvent(self._engine_id, actionId, itemId)\n\n if self._db:\n self._db.commit_db()", "def _set_meta_info(self):\n self._report_data['environment'] = f'{self._get_environment()}'.lstrip()\n self._report_data['meta_account_id'] = self._account_id\n if self._account_name:\n self._report_data['meta_account_name'] = self._account_name\n\n # Get source ???\n # Appears in the Description section of the PDF Document Properties as Title.\n self._report_data['meta_title'] = ReportMeta.reports[self._report_key]['metaTitle'].upper()\n self._report_data['meta_subtitle'] = ReportMeta.reports[self._report_key]['metaSubtitle']\n\n # Appears in the Description section of the PDF Document Properties as Subject.\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT,\n ReportTypes.SEARCH_TOC_REPORT,\n ReportTypes.SEARCH_BODY_REPORT):\n search_type: str = self._report_data['searchQuery']['type']\n search_desc: str = TO_SEARCH_DESCRIPTION[search_type]\n criteria: str = ''\n if search_type == 'OWNER_NAME':\n criteria = self._report_data['searchQuery']['criteria']['ownerName']['last'] + ', '\n criteria += self._report_data['searchQuery']['criteria']['ownerName']['first']\n if 'middle' in self._report_data['searchQuery']['criteria']['ownerName']:\n criteria += ' ' + self._report_data['searchQuery']['criteria']['ownerName']['middle']\n else:\n criteria = self._report_data['searchQuery']['criteria']['value'].upper()\n self._report_data['meta_subject'] = f'{search_desc} - \"{criteria}\"'\n if search_type == 'MHR_NUMBER':\n self._report_data['footer_content'] = f'MHR Number Search - \"{criteria}\"'\n else:\n self._report_data['footer_content'] = f'MHR {search_desc} Search - \"{criteria}\"'\n elif self._report_key in (ReportTypes.MHR_REGISTRATION, ReportTypes.MHR_COVER,\n ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE,\n ReportTypes.MHR_TRANSPORT_PERMIT, ReportTypes.MHR_REGISTRATION_COVER):\n reg_num = self._report_data.get('mhrNumber', '')\n self._report_data['footer_content'] = f'Manufactured Home Registration #{reg_num}'\n self._report_data['meta_subject'] = f'Manufactured Home Registration Number: {reg_num}'\n if self._get_environment() != '':\n self._report_data['footer_content'] = 'TEST DATA | ' + self._report_data['footer_content']", "def parse(self, response):\n page_jobs=[]\n\n # Calling abstarct method get_jobs_list() and iterating...\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n\n if not job_dict['url'] or not job_dict['title'] :\n # At least url, title data is loaded from the list of job posting ...\n raise ValueError( \"Could not find valid job information ('url' and 'title') in data:\\n\" + \n str(div.get()) + \"\\nScraped infos:\\n\" + str(job_dict) + \"\\nReport this issue on github!\" )\n \n # Store source as the name of the spider aka website\n job_dict['source']=self.name\n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - it's a new job (not in database)\n - load_full_jobs=Yes\n - the method parse_full_job_page() has been re-wrote by the Scraper subclass\n \"\"\"\n if ( (not self.db or self.db.find_job(job_dict)==None)\n and self.load_full_jobs ):\n if type(self).parse_full_job_page != Scraper.parse_full_job_page:\n # load_full_jobs=Yes and it's supported by scraper\n # Call parse_full_job_page() with job URL\n\n # Handle SeleniumRequest if use_selenium=True\n if self.use_selenium:\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict))\n else:\n yield Job(job_dict)\n else:\n yield Job(job_dict)\n\n \"\"\" Just printing in one line \"\"\"\n if self.load_full_jobs:\n if type(self).parse_full_job_page == Scraper.parse_full_job_page:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True and load_all_new_pages=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True, some informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. load_all_new_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n self.log.info(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n If all page jobs are new and \n The method get_next_page_url() has been re-wrote by the Scraper subclass\n Scrape next page\n \"\"\"\n if self.load_all_new_pages==True:\n if self.db and any( [self.db.find_job(job_dict)!=None for job_dict in page_jobs] ):\n # All new job postings loaded\n pass\n else:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n if self.use_selenium:\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(\n url=self.get_next_page_url(response),\n callback=self.parse)\n else:\n if type(self).get_next_page_url != Scraper.get_next_page_url:\n # Last page loaded\n pass\n else:\n self.log.info(\"Scraper {} does not support load_all_new_pages=True, some new job postings might be missing\".format(self.name))", "async def process_reports(self):\n await self.aggregate_weights(self.updates)\n\n # Testing the global model accuracy\n if Config().clients.do_test:\n # Compute the average accuracy from client reports\n self.average_accuracy = self.accuracy_averaging(self.updates)\n logging.info(\n '[Server #{:d}] Average client accuracy: {:.2f}%.'.format(\n os.getpid(), 100 * self.average_accuracy))\n\n if hasattr(Config().server, 'do_test'):\n if not Config().clients.do_test or Config().server.do_test:\n # Test the updated model directly at the server\n self.accuracy = self.trainer.test(self.testset)\n logging.info(\n '[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n else:\n self.accuracy = self.average_accuracy\n\n await self.wrap_up_processing_reports()", "def processSearchResult(self):", "def process_item(self, item, spider):\n try:\n\n url_md5 = md5(item['url'])\n # self.r_conn.set(url_md5, html_body.read())\n # item['html_body'] = None\n\n sqli = \"insert into spider_content values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n news = {'content': item}\n\n if item['From'] == '0':\n # self.mysqlop.execute(\"insert into spider_content values('url_md5')\")\n # self.mysqlop.execute(sqli, (url_md5, None, item['spider_name'], item['catch_date'],\n # item['From'], item['url'], item['title'].encode('utf-8'), item['summary'].encode('utf-8'), item['site_url'],\n # None, None, None, item['site_name'].encode('utf-8'), None))\n\n self.db.emergency.insert(news)\n elif item['From'] == '1' or item['From'] == '3':\n # self.mysqlop.execute(sqli, (url_md5, item['publish_time'], item['spider_name'], item['catch_date'],\n # item['From'], item['url'], item['title'].encode('utf-8'), item['summary'].encode('utf-8'), item['site_url'],\n # None, None, None, item['site_name'].encode('utf-8'), None))\n\n self.db.news.insert(news)\n elif item['From'] == '2':\n\n # self.mysqlop.execute(sqli, (url_md5, item['publish_time'], item['spider_name'], item['catch_date'],\n # item['From'], item['url'], item['title'].encode('utf-8'), item['summary'].encode('utf-8'), item['site_url'],\n # item['author'].encode('utf-8'), item['replay_times'], item['view_times'], item['site_name'].encode('utf-8'), None))\n self.db.bbs.insert(news)\n\n except Exception, e:\n print 'pipeline error', e", "def item_dict():\n\n items = {'page': 'pages', 'table': 'tables',\n 'viz': 'vizualisation', 'column': 'columns'}\n return items", "def scrape(self):\n\n #Get page\n soup, _ = getPage(self.url)\n\n #Check page was found\n if soup is None:\n self.na = True\n return\n\n #Find price\n try:\n self.price = soup.find(class_=\"user-ad-price__price\").get_text()\n except:\n pass\n\n #Find attributes names/values\n adAttrVals = soup.find_all(class_=\"vip-ad-attributes__name\")\n adAttrName = soup.find_all(class_=\"vip-ad-attributes__value\")\n #Find description\n try:\n self.description = soup.find(class_=\"vip-ad-description__content--wrapped\").get_text()\n except:\n pass\n\n #Check all attributes for important information\n for i in range(0,len(adAttrName)):\n tempName = adAttrName[i].get_text()\n tempVal = adAttrVals[i].get_text()\n if \"Date Listed:\" in tempName:\n #Can be date or words (eg 16 minutes ago, yesterday)\n try:\n #Will work if date\n listDateLst = tempVal.lstrip().split('/')\n self.listDate = listDateLst[2]+'-'+listDateLst[1]+'-'+listDateLst[0]\n except:\n #Check not empty\n if tempVal is not None:\n if tempVal == \"Yesterday\":\n #Yesterday\n self.listDate = (datetime.today() - timedelta(days=1)).strftime('%Y-%m-%d')\n else:\n #Either hours or minutes\n self.listDate = datetime.today().strftime('%Y-%m-%d')\n \n elif \"Displacement (cc):\" in tempName:\n self.displacement = tempVal.lstrip()\n elif \"Make:\" in tempName:\n self.make = tempVal.lstrip()\n elif \"Model:\" in tempName:\n self.model = tempVal.lstrip()\n elif \"Year:\" in tempName:\n self.year = tempVal.lstrip()\n elif \"KMs:\" in tempName:\n self.kms = tempVal.lstrip()\n elif \"Registered:\" in tempName:\n if tempVal.lstrip() == \"Yes\":\n self.registered = \"Y\"\n elif tempVal.lstrip() == \"No\":\n self.registered = \"N\"\n elif \"Registration Expiry:\" in tempName:\n regExpLst = tempVal.lstrip().split('/')\n self.regExpiry = regExpLst[2]+'-'+regExpLst[1]+'-'+regExpLst[0]\n elif \"Colour:\" in tempName:\n self.colour = tempVal.lstrip()\n elif \"Learner Approved:\" in tempName:\n if tempVal.lstrip() == \"Yes\":\n self.learner = \"Y\"\n elif tempVal.lstrip() == \"No\":\n self.learner = \"N\"\n elif \"Listing Type:\" in tempName:\n self.listType = tempVal.lstrip()", "def display_item_process(self):\n raise NotImplementedError()", "def scrape(self):\n pass", "def after_parse(self, response):\n\n extraction_requests = []\n\n for container in response.xpath('//tr[@align=\"center\"]'):\n detail_url = container.xpath('./td[1]/a/ @href').extract()[0]\n\n l = BusinessLoader(selector=container, response=response)\n l.add_xpath('telephone', './td[1]/span/ text()')\n l.add_xpath('website', './td[2]/a/ @href')\n l.add_xpath('email', \"substring-after(./td[4]/a/ @href,'mailto:')\")\n l.add_xpath('legalName', './td[1]/a/ text()')\n item = l.load_item()\n\n log.msg('business details extracted from index: {0}'.format(item))\n\n extraction_requests.append(Request(url = urljoin(response.url, detail_url), meta={'item':item}, callback=self.extract))\n\n return extraction_requests", "def _process(self):\n export_collect_medias(self.kwargs[\"collect\"])", "def prepare(self):\n for scenario_result, scenario_pass, case_pass in self.iterate():\n for step_result in scenario_result.step_results:\n step_pass = step_result.success\n url, method = step_result.fetch.url, step_result.fetch.method\n params = step_result.fetch.kwargs.get(\"params\")\n method_report = self.get_method_report(url, method)\n if method_report:\n method_report.add(\n case_pass, scenario_pass, step_pass, params\n )", "def scrap(dic: dict, nb: int):\n name_col = ['Data_Source', 'Airline_Name', 'Airline_Type', 'Region_Operation', 'Aircraft_Type', 'Cabin_Class', 'Type_Of_Lounge',\n 'Type_Of_Traveller', 'Date_Visit', 'Date_Flown', 'Airport', 'Route', 'Category', 'Category_Detail',\n 'Cabin_Staff_Service', 'Lounge_Staff_Service', 'Bar_And_Beverages', 'Food_And_Beverages', 'Ground_Service', 'Catering', 'Cleanliness',\n 'Lounge_Comfort', 'Aisle_Space', 'Wifi_And_Connectivity', 'Inflight_Entertainment', 'Viewing_Tv_Screen', 'Power_Supply',\n 'Seat', 'Seat_type', 'Seat_Comfort', 'Seat_Legroom', 'Seat_Storage', 'Seat_Width', 'Seat_Recline', 'Washrooms',\n 'Value_For_Money', 'Overall_Customer_Rating', 'Overall_Service_Rating', 'Overall_Airline_Rating',\n 'Recommended', 'Departure_city', 'Arrival_city', 'Nb_bus_taken', 'Nb_train_taken',\n 'Nb_car_taken', 'Nb_plane_taken', 'Duration', 'Price_min', 'Price_max', 'Nb_sharing', 'Awards', 'Registration', 'Language',\n 'Queuing Times', 'Terminal_Seating', 'Terminal Signs', 'Airport_Shopping', 'Experience_At_Airport', 'Date_Review']\n\n dataAirline = pd.DataFrame(columns=name_col)\n\n for dic_key, dic_val in dic.items():\n r = requests.get(dic_val)\n page = r.text\n soup = bs(page, 'html.parser')\n nb_page = Nb_pages(soup)\n\n for j in range(1, nb_page+1):\n r = requests.get(dic_val + '/page/' + str(j) + '/')\n page = r.text\n soup = bs(page, 'html.parser')\n\n Date_Review = dateReview(soup, nb)\n title = title_comm(soup, nb)\n desc = description(soup, nb)\n note = UserNot(soup, nb)\n notGlo = NoteGlobal(soup, nb)\n\n airport = []\n source = []\n\n for i in range(0, len(desc)):\n airport.append(dic_key)\n source.append('AirlineQuality')\n\n df = pd.DataFrame(data=[title, desc, note, airport])\n df = df.transpose()\n\n Title = df[0]\n Review = df[1]\n Date_Visit, Terminal_Cleanliness, Food_Beverages, Wifi_Connectivity, Airport_Staff, Recommended, Type_Of_Traveller, Queuing_Times, Terminal_Seating, Terminal_Signs, Airport_Shopping, Experience_At_Airport = transformColInDic(\n df[2])\n Airport = df[3]\n\n df_template = pd.DataFrame({'Data_Source': source, 'Date_Flown': Date_Visit, 'Cleanliness': Terminal_Cleanliness, 'Food_And_Beverages': Food_Beverages,\n 'Wifi_And_Connectivity': Wifi_Connectivity, 'Cabin_Staff_Service': Airport_Staff, 'Overall_Customer_Rating': notGlo,\n 'Recommended': Recommended, 'Title': Title, 'Review': Review, 'Airport': Airport, 'Type_Of_Traveller': Type_Of_Traveller,\n 'Queuing_Times': Queuing_Times, 'Terminal_Seating': Terminal_Seating, 'Terminal_Signs': Terminal_Signs,\n 'Airport_Shopping': Airport_Shopping, 'Experience_At_Airport': Experience_At_Airport, 'Date_Review': Date_Review})\n\n dataAirline = pd.concat([dataAirline, df_template])\n\n return dataAirline", "def parse_items(self, response: Response) -> RlItem:\n self.logger.info('Crawler Found Item Page: %s', response.url)\n\n # Iterate through each rocket league item and build it.\n for elem_item in response.xpath('//div[starts-with(@class, \"rlg-item__container\")]'):\n loader = RlItemLoader(item=RlItem(), selector=elem_item)\n loader.add_xpath('data_id', './/div/@data-id')\n loader.add_xpath('img_url', './/img/@src')\n loader.add_value('name', elem_item.attrib['data-name'])\n loader.add_value('category', elem_item.attrib['data-category'])\n loader.add_value('platform', elem_item.attrib['data-platform'])\n loader.add_value('rarity', elem_item.attrib['data-rarity'])\n loader.add_value('dlcpack', elem_item.attrib['data-dlcpack'])\n yield loader.load_item()", "def _scrape_agenda_item(self, agenda_item_location):\n pass", "def _get_item_info(self, response):\n item_info = {\"keys\":[], \"values\":[]}\n for selector_action in self.item_selector.selectors_actions:\n if isinstance(selector_action, KeyValueSelector):\n # keys can be either strings or selectors. For the latter, obtain the key from the page\n key_selector = selector_action.key_selector\n if isinstance(key_selector, FieldSelector): #key_selector is a FieldSelector, use it to get the key from the response\n sel = Selector(response)\n if key_selector.type == FieldSelector.XPATH:\n key = sel.xpath(key_selector).extract()\n elif key_selector.type == FieldSelector.CSS:\n key = sel.css(key_selector).extract()\n if key: key = key[0]\n else: key = \"Invalid_Key_Selector\" #this may pack in all values with invalid keys with this key.\n else: \n key = key_selector\n value_selector = selector_action.value_selector\n item_info[\"keys\"].append(key)\n item_info[\"values\"].append(value_selector)\n return item_info" ]
[ "0.6547524", "0.6395213", "0.619723", "0.6070499", "0.6036251", "0.6004193", "0.59119064", "0.58983403", "0.58883923", "0.5818666", "0.5750651", "0.5722851", "0.5683043", "0.5669266", "0.56607234", "0.5656393", "0.56268835", "0.5572283", "0.5531241", "0.551983", "0.5516696", "0.54642683", "0.545876", "0.545876", "0.5458145", "0.5446324", "0.5442388", "0.5408472", "0.5389968", "0.5376792", "0.53672725", "0.5364257", "0.5350885", "0.5350443", "0.53387016", "0.5337003", "0.5322952", "0.5312816", "0.5307892", "0.5304615", "0.53010434", "0.52980137", "0.52980137", "0.5290555", "0.5285792", "0.52821904", "0.52813727", "0.5278933", "0.5278711", "0.527743", "0.52685106", "0.52566904", "0.5250725", "0.5248543", "0.52459085", "0.52408427", "0.52395654", "0.52323645", "0.52307975", "0.5207514", "0.51913214", "0.5190577", "0.5187712", "0.51859766", "0.5181897", "0.51667935", "0.5166295", "0.51597244", "0.5153753", "0.5148439", "0.51443624", "0.51351684", "0.5129006", "0.51189345", "0.5114169", "0.5108928", "0.50947946", "0.5089839", "0.5089144", "0.50861394", "0.50851923", "0.5084256", "0.5082719", "0.50770247", "0.5072545", "0.5071674", "0.5063464", "0.5062395", "0.5059421", "0.5052119", "0.50440377", "0.50404197", "0.5038035", "0.503125", "0.50310117", "0.502112", "0.5019321", "0.5011119", "0.5009268", "0.50059795" ]
0.5546761
18
Given inputs, take move and return outputs.
def move(self, env2): output = dict() if self.state == 0: if (env2 == 0): self.state = 0 output["loc"] = 16 output["stage"] = 1 elif (env2 == 1): self.state = 33 output["loc"] = 17 output["stage"] = 1 elif (env2 == 2): self.state = 34 output["loc"] = 17 output["stage"] = 1 else: self._error(env2) elif self.state == 1: if (env2 == 0): self.state = 0 output["loc"] = 16 output["stage"] = 1 elif (env2 == 1): self.state = 33 output["loc"] = 17 output["stage"] = 1 elif (env2 == 2): self.state = 34 output["loc"] = 17 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 2: if (env2 == 0): self.state = 0 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 2): self.state = 34 output["loc"] = 17 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 1): self.state = 33 output["loc"] = 17 output["stage"] = 1 else: self._error(env2) elif self.state == 3: if (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 2): self.state = 2 output["loc"] = 16 output["stage"] = 1 elif (env2 == 1): self.state = 1 output["loc"] = 16 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 4: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 2): self.state = 2 output["loc"] = 16 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 5: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 7): self.state = 5 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 6: if (env2 == 0): self.state = 8 output["loc"] = 20 output["stage"] = 1 elif (env2 == 1): self.state = 18 output["loc"] = 21 output["stage"] = 1 elif (env2 == 2): self.state = 19 output["loc"] = 21 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 7: if (env2 == 0): self.state = 8 output["loc"] = 20 output["stage"] = 1 elif (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 1): self.state = 18 output["loc"] = 21 output["stage"] = 1 elif (env2 == 2): self.state = 19 output["loc"] = 21 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 8: if (env2 == 0): self.state = 8 output["loc"] = 20 output["stage"] = 1 elif (env2 == 1): self.state = 18 output["loc"] = 21 output["stage"] = 1 elif (env2 == 2): self.state = 19 output["loc"] = 21 output["stage"] = 1 else: self._error(env2) elif self.state == 9: if (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 7): self.state = 20 output["loc"] = 20 output["stage"] = 1 elif (env2 == 5): self.state = 22 output["loc"] = 20 output["stage"] = 1 elif (env2 == 6): self.state = 23 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 10: if (env2 == 7): self.state = 10 output["loc"] = 20 output["stage"] = 2 elif (env2 == 6): self.state = 11 output["loc"] = 20 output["stage"] = 2 elif (env2 == 5): self.state = 12 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 11: if (env2 == 7): self.state = 10 output["loc"] = 20 output["stage"] = 2 elif (env2 == 6): self.state = 11 output["loc"] = 20 output["stage"] = 2 elif (env2 == 5): self.state = 12 output["loc"] = 20 output["stage"] = 2 elif (env2 == 4): self.state = 13 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 12: if (env2 == 7): self.state = 10 output["loc"] = 20 output["stage"] = 2 elif (env2 == 6): self.state = 11 output["loc"] = 20 output["stage"] = 2 elif (env2 == 5): self.state = 12 output["loc"] = 20 output["stage"] = 2 elif (env2 == 4): self.state = 13 output["loc"] = 20 output["stage"] = 2 elif (env2 == 3): self.state = 14 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 13: if (env2 == 2): self.state = 16 output["loc"] = 20 output["stage"] = 2 elif (env2 == 6): self.state = 11 output["loc"] = 20 output["stage"] = 2 elif (env2 == 5): self.state = 12 output["loc"] = 20 output["stage"] = 2 elif (env2 == 4): self.state = 13 output["loc"] = 20 output["stage"] = 2 elif (env2 == 3): self.state = 14 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 14: if (env2 == 2): self.state = 16 output["loc"] = 20 output["stage"] = 2 elif (env2 == 5): self.state = 12 output["loc"] = 20 output["stage"] = 2 elif (env2 == 4): self.state = 13 output["loc"] = 20 output["stage"] = 2 elif (env2 == 3): self.state = 14 output["loc"] = 20 output["stage"] = 2 elif (env2 == 1): self.state = 15 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 15: if (env2 == 2): self.state = 16 output["loc"] = 20 output["stage"] = 2 elif (env2 == 0): self.state = 17 output["loc"] = 20 output["stage"] = 2 elif (env2 == 3): self.state = 14 output["loc"] = 20 output["stage"] = 2 elif (env2 == 1): self.state = 15 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 16: if (env2 == 2): self.state = 16 output["loc"] = 20 output["stage"] = 2 elif (env2 == 0): self.state = 17 output["loc"] = 20 output["stage"] = 2 elif (env2 == 4): self.state = 13 output["loc"] = 20 output["stage"] = 2 elif (env2 == 3): self.state = 14 output["loc"] = 20 output["stage"] = 2 elif (env2 == 1): self.state = 15 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 17: if (env2 == 2): self.state = 16 output["loc"] = 20 output["stage"] = 2 elif (env2 == 0): self.state = 17 output["loc"] = 20 output["stage"] = 2 elif (env2 == 1): self.state = 15 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 18: if (env2 == 2): self.state = 16 output["loc"] = 20 output["stage"] = 2 elif (env2 == 0): self.state = 17 output["loc"] = 20 output["stage"] = 2 elif (env2 == 3): self.state = 14 output["loc"] = 20 output["stage"] = 2 elif (env2 == 1): self.state = 15 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 19: if (env2 == 2): self.state = 16 output["loc"] = 20 output["stage"] = 2 elif (env2 == 0): self.state = 17 output["loc"] = 20 output["stage"] = 2 elif (env2 == 4): self.state = 13 output["loc"] = 20 output["stage"] = 2 elif (env2 == 3): self.state = 14 output["loc"] = 20 output["stage"] = 2 elif (env2 == 1): self.state = 15 output["loc"] = 20 output["stage"] = 2 else: self._error(env2) elif self.state == 20: if (env2 == 7): self.state = 20 output["loc"] = 20 output["stage"] = 1 elif (env2 == 5): self.state = 22 output["loc"] = 20 output["stage"] = 1 elif (env2 == 6): self.state = 23 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 21: if (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 1): self.state = 18 output["loc"] = 21 output["stage"] = 1 elif (env2 == 2): self.state = 19 output["loc"] = 21 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 elif (env2 == 5): self.state = 22 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 22: if (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 7): self.state = 20 output["loc"] = 20 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 elif (env2 == 5): self.state = 22 output["loc"] = 20 output["stage"] = 1 elif (env2 == 6): self.state = 23 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 23: if (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 7): self.state = 20 output["loc"] = 20 output["stage"] = 1 elif (env2 == 5): self.state = 22 output["loc"] = 20 output["stage"] = 1 elif (env2 == 6): self.state = 23 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 24: if (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 2): self.state = 19 output["loc"] = 21 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 elif (env2 == 5): self.state = 22 output["loc"] = 20 output["stage"] = 1 elif (env2 == 6): self.state = 23 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 25: if (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 7): self.state = 20 output["loc"] = 20 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 elif (env2 == 5): self.state = 22 output["loc"] = 20 output["stage"] = 1 elif (env2 == 6): self.state = 23 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 26: if (env2 == 6): self.state = 9 output["loc"] = 19 output["stage"] = 1 elif (env2 == 4): self.state = 26 output["loc"] = 18 output["stage"] = 1 elif (env2 == 3): self.state = 27 output["loc"] = 18 output["stage"] = 1 elif (env2 == 2): self.state = 29 output["loc"] = 19 output["stage"] = 1 elif (env2 == 5): self.state = 25 output["loc"] = 19 output["stage"] = 1 else: self._error(env2) elif self.state == 27: if (env2 == 5): self.state = 25 output["loc"] = 19 output["stage"] = 1 elif (env2 == 4): self.state = 26 output["loc"] = 18 output["stage"] = 1 elif (env2 == 3): self.state = 27 output["loc"] = 18 output["stage"] = 1 elif (env2 == 1): self.state = 28 output["loc"] = 19 output["stage"] = 1 elif (env2 == 2): self.state = 29 output["loc"] = 19 output["stage"] = 1 else: self._error(env2) elif self.state == 28: if (env2 == 0): self.state = 8 output["loc"] = 20 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 elif (env2 == 1): self.state = 6 output["loc"] = 20 output["stage"] = 1 elif (env2 == 2): self.state = 7 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 29: if (env2 == 0): self.state = 8 output["loc"] = 20 output["stage"] = 1 elif (env2 == 4): self.state = 24 output["loc"] = 20 output["stage"] = 1 elif (env2 == 3): self.state = 21 output["loc"] = 20 output["stage"] = 1 elif (env2 == 1): self.state = 6 output["loc"] = 20 output["stage"] = 1 elif (env2 == 2): self.state = 7 output["loc"] = 20 output["stage"] = 1 else: self._error(env2) elif self.state == 30: if (env2 == 0): self.state = 32 output["loc"] = 18 output["stage"] = 1 elif (env2 == 3): self.state = 27 output["loc"] = 18 output["stage"] = 1 elif (env2 == 1): self.state = 28 output["loc"] = 19 output["stage"] = 1 elif (env2 == 2): self.state = 29 output["loc"] = 19 output["stage"] = 1 else: self._error(env2) elif self.state == 31: if (env2 == 0): self.state = 32 output["loc"] = 18 output["stage"] = 1 elif (env2 == 4): self.state = 26 output["loc"] = 18 output["stage"] = 1 elif (env2 == 3): self.state = 27 output["loc"] = 18 output["stage"] = 1 elif (env2 == 1): self.state = 28 output["loc"] = 19 output["stage"] = 1 elif (env2 == 2): self.state = 29 output["loc"] = 19 output["stage"] = 1 else: self._error(env2) elif self.state == 32: if (env2 == 0): self.state = 32 output["loc"] = 18 output["stage"] = 1 elif (env2 == 1): self.state = 28 output["loc"] = 19 output["stage"] = 1 elif (env2 == 2): self.state = 29 output["loc"] = 19 output["stage"] = 1 else: self._error(env2) elif self.state == 33: if (env2 == 0): self.state = 32 output["loc"] = 18 output["stage"] = 1 elif (env2 == 1): self.state = 30 output["loc"] = 18 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 2): self.state = 31 output["loc"] = 18 output["stage"] = 1 else: self._error(env2) elif self.state == 34: if (env2 == 0): self.state = 32 output["loc"] = 18 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 1): self.state = 30 output["loc"] = 18 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 2): self.state = 31 output["loc"] = 18 output["stage"] = 1 else: self._error(env2) elif self.state == 35: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 7): self.state = 37 output["loc"] = 0 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 36: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 7): self.state = 37 output["loc"] = 0 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 37: if (env2 == 5): self.state = 35 output["loc"] = 8 output["stage"] = 1 elif (env2 == 6): self.state = 36 output["loc"] = 8 output["stage"] = 1 elif (env2 == 7): self.state = 37 output["loc"] = 0 output["stage"] = 1 else: self._error(env2) elif self.state == 38: if (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 2): self.state = 34 output["loc"] = 17 output["stage"] = 1 elif (env2 == 1): self.state = 33 output["loc"] = 17 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 39: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 7): self.state = 5 output["loc"] = 16 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 40: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 7): self.state = 5 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 41: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 2): self.state = 34 output["loc"] = 17 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 42: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 7): self.state = 37 output["loc"] = 0 output["stage"] = 1 elif (env2 == 3): self.state = 38 output["loc"] = 16 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 43: if (env2 == 6): self.state = 40 output["loc"] = 16 output["stage"] = 1 elif (env2 == 4): self.state = 41 output["loc"] = 16 output["stage"] = 1 elif (env2 == 7): self.state = 37 output["loc"] = 0 output["stage"] = 1 elif (env2 == 5): self.state = 39 output["loc"] = 16 output["stage"] = 1 else: self._error(env2) elif self.state == 44: if (env2 == 1): self.state = 48 output["loc"] = 0 output["stage"] = 0 elif (env2 == 0): self.state = 44 output["loc"] = 0 output["stage"] = 0 elif (env2 == 2): self.state = 46 output["loc"] = 0 output["stage"] = 0 else: self._error(env2) elif self.state == 45: if (env2 == 6): self.state = 43 output["loc"] = 8 output["stage"] = 0 elif (env2 == 5): self.state = 42 output["loc"] = 8 output["stage"] = 0 elif (env2 == 3): self.state = 3 output["loc"] = 8 output["stage"] = 0 elif (env2 == 4): self.state = 4 output["loc"] = 8 output["stage"] = 0 elif (env2 == 2): self.state = 46 output["loc"] = 0 output["stage"] = 0 else: self._error(env2) elif self.state == 46: if (env2 == 1): self.state = 48 output["loc"] = 0 output["stage"] = 0 elif (env2 == 0): self.state = 44 output["loc"] = 0 output["stage"] = 0 elif (env2 == 3): self.state = 3 output["loc"] = 8 output["stage"] = 0 elif (env2 == 4): self.state = 4 output["loc"] = 8 output["stage"] = 0 elif (env2 == 2): self.state = 46 output["loc"] = 0 output["stage"] = 0 else: self._error(env2) elif self.state == 47: if (env2 == 6): self.state = 43 output["loc"] = 8 output["stage"] = 0 elif (env2 == 5): self.state = 42 output["loc"] = 8 output["stage"] = 0 elif (env2 == 7): self.state = 51 output["loc"] = 0 output["stage"] = 0 elif (env2 == 4): self.state = 4 output["loc"] = 8 output["stage"] = 0 else: self._error(env2) elif self.state == 48: if (env2 == 1): self.state = 48 output["loc"] = 0 output["stage"] = 0 elif (env2 == 3): self.state = 3 output["loc"] = 8 output["stage"] = 0 elif (env2 == 0): self.state = 44 output["loc"] = 0 output["stage"] = 0 elif (env2 == 2): self.state = 46 output["loc"] = 0 output["stage"] = 0 else: self._error(env2) elif self.state == 49: if (env2 == 7): self.state = 51 output["loc"] = 0 output["stage"] = 0 elif (env2 == 5): self.state = 42 output["loc"] = 8 output["stage"] = 0 elif (env2 == 3): self.state = 3 output["loc"] = 8 output["stage"] = 0 elif (env2 == 4): self.state = 4 output["loc"] = 8 output["stage"] = 0 elif (env2 == 6): self.state = 43 output["loc"] = 8 output["stage"] = 0 else: self._error(env2) elif self.state == 50: if (env2 == 1): self.state = 48 output["loc"] = 0 output["stage"] = 0 elif (env2 == 5): self.state = 42 output["loc"] = 8 output["stage"] = 0 elif (env2 == 3): self.state = 3 output["loc"] = 8 output["stage"] = 0 elif (env2 == 4): self.state = 4 output["loc"] = 8 output["stage"] = 0 elif (env2 == 2): self.state = 46 output["loc"] = 0 output["stage"] = 0 else: self._error(env2) elif self.state == 51: if (env2 == 6): self.state = 43 output["loc"] = 8 output["stage"] = 0 elif (env2 == 5): self.state = 42 output["loc"] = 8 output["stage"] = 0 elif (env2 == 7): self.state = 51 output["loc"] = 0 output["stage"] = 0 else: self._error(env2) elif self.state == 52: if (env2 == 0): self.state = 44 output["loc"] = 0 output["stage"] = 0 elif (env2 == 4): self.state = 45 output["loc"] = 0 output["stage"] = 0 elif (env2 == 2): self.state = 46 output["loc"] = 0 output["stage"] = 0 elif (env2 == 6): self.state = 47 output["loc"] = 0 output["stage"] = 0 elif (env2 == 1): self.state = 48 output["loc"] = 0 output["stage"] = 0 elif (env2 == 5): self.state = 49 output["loc"] = 0 output["stage"] = 0 elif (env2 == 3): self.state = 50 output["loc"] = 0 output["stage"] = 0 elif (env2 == 7): self.state = 51 output["loc"] = 0 output["stage"] = 0 else: self._error(env2) else: raise Exception("Unrecognized internal state: " + str(self.state)) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transduce(self,inputs):\n self.start()\n return [self.step(inp) for inp in inputs]", "def api_make_move(self, move_input):\n return self.board.attempt_move(move_input)", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def move(self, output_file, input_file, no_input=False):\n input = open(input_file, \"r\")\n for command in input:\n cleaned_cmd = command.strip()\n if cleaned_cmd in self.movements:\n movement, move_str = self.pick_movement(cleaned_cmd)\n if move_str == 'forward' or move_str == 'backward':\n movement(4)\n else:\n movement(2)\n # for debug purposes\n self.log_arr.append(move_str)\n drone_pose = self.record_pose()\n self.get_np_image(True, \"curr_image.png\")\n airsim.time.sleep(1)\n input.close()\n self.write_log(output_file)\n print(\"finished episode\")", "def outputs(self, inputs):\n return inputs", "def move():\n # step 1 of task analysis: get data\n data = get_data('MovementData/Walking_02.txt')\n # step 2: get the initial orientation of the sensor\n sensor_orientation = get_init_orientation_sensor(data.acc[0])\n # step 3: get the vector of the right horizontal semi-circular canal's on-direction\n rhscc_init_on_dir = get_init_on_dir_rh_scc(15)\n # preparation for step 4: align the angular velocity sensor data with the global coordinate system\n angular_velocities_aligned_globally = align_sensor_data_globally(data.omega, sensor_orientation)\n # step 4: calculate the stimulation of the cupula\n stimuli = get_scc_stimulation(angular_velocities_aligned_globally, rhscc_init_on_dir)\n # step 5: get the transfer function of the scc with the dynamics provided in the lecture\n scc_trans_fun = get_scc_transfer_fun(0.01, 5)\n # step 6: get the cupular deflection\n max_cupular_deflection = calculate_max_cupular_deflection(scc_trans_fun, stimuli, data.rate)\n # preparation for step 7: align the acceleration sensor data with the global coordinate system\n accelerations_aligned_globally = align_sensor_data_globally(data.acc, sensor_orientation)\n # step 8: calculate the maxmimum left- and rightwards stimulation of the otolithic organ\n max_left_right_stimuli = calculate_otolithic_max_stimuli(accelerations_aligned_globally, 1)\n # step 9: calculate the head orientation\n head_orientations = calculate_head_orientation(angular_velocities_aligned_globally, data.rate)\n\n return max_cupular_deflection, max_left_right_stimuli, head_orientations", "def get_move(moves):\n pass", "def forward_pass(self, inputs):\n self._rbf_forward(inputs)\n self._slp_forward()\n return self.slp_outputs", "def exercise_4(inputs): # DO NOT CHANGE THIS LINE\n output = inputs\n\n return output # DO NOT CHANGE THIS LINE", "def forward(self, inputs):\n _, state = self.core(inputs)\n return state", "def move(self, source, input):\n # raise NotImplementedError\n destination = set()\n\n nodes = {source} if not isinstance(source, set) else source\n nodes.update(self.get_epsilon_closures(nodes))\n inputs = {input} if not isinstance(input, set) else input\n\n for node in nodes:\n for edge in self.graph.edges_iter(node, True):\n from_node, to_node, weight = edge\n weight = weight['weight']\n\n # TODO: the following needs better (scientific) implementation\n try:\n weight = weight if weight == FA.epsilon else eval(weight)\n except TypeError:\n weight = chr(weight)\n except (NameError, SyntaxError):\n pass\n\n for input in inputs:\n if str(weight) == str(input):\n destination.add(to_node)\n\n return destination", "def next_output(self, inputs: Union[int, List[int], None] = None):\n if inputs is None:\n inputs = []\n if type(inputs) == int:\n inputs = [inputs]\n for input_ in inputs:\n self._inputs.append(input_)\n\n while not self.finished:\n self.index, output = self._step(self.index)\n if output is not None:\n return output", "def forward(self, inputs):\n raise NotImplementedError", "def get_outputs(self, inputs):\n \n # Paths\n input_path = self.input_path\n output_path = self.output_path\n\n # Filename changes\n output_extension = stringify(self.executor.output_extension)\n output_prefix = stringify(self.executor.output_prefix) or ''\n\n if self.output:\n # Combine all inputs into one output\n output = output_prefix + change_extension(self.output, output_extension)\n output = join_path(output_path, output)\n \n if self.output_transform:\n output = self.output_transform(output)\n \n if self.run_output:\n if self.run_command:\n verify_type(self.run_command, list)\n run_command = [stringify(v).format(output=output) for v in self.run_command]\n else:\n run_command = [output]\n with current_context() as ctx:\n ctx.current.project.run[self.run_output] = run_command\n \n return True, [Output(output_path, output)]\n elif inputs:\n # Each input matches an output\n \n # Strip prefix\n if self.output_strip_prefix_from:\n with current_context() as ctx:\n _, p = ctx.current.project.get_phase_for(self.output_strip_prefix_from,\n 'output_strip_prefix_from')\n if p:\n output_strip_prefix = p.output_path\n else:\n output_strip_prefix = None\n else:\n output_strip_prefix = stringify(self.output_strip_prefix)\n if output_strip_prefix is None:\n output_strip_prefix = input_path\n if not output_strip_prefix.endswith(os.sep):\n output_strip_prefix += os.sep\n output_strip_prefix_length = len(output_strip_prefix)\n \n outputs = [] \n for the_input in inputs:\n output = the_input\n \n # Strip prefix\n if output.startswith(output_strip_prefix):\n output = output[output_strip_prefix_length:]\n\n # Filename changes\n if output_prefix:\n p, f = os.path.split(output)\n output = join_path(p, output_prefix + f)\n output = change_extension(output, output_extension)\n \n output = join_path(output_path, output)\n\n if self.output_transform:\n output = self.output_transform(output)\n\n outputs.append(Output(output_path, output))\n \n return False, outputs\n else:\n return False, []", "def generate_simulated_movements(Ns, dT, Ts, ts, move_type):\n moves = []\n for ni, n in enumerate(Ns):\n _temp = []\n for dt in dT:\n sys.stdout.write('\\rNs: {0}, dT: {1}'.format(n, dt))\n t, m, _ = gen_move(n, [1. / n] * n, [dt] * (n - 1), [Ts] * n,\n ts=ts, move_type=move_type)\n _temp.append(m)\n moves.append(_temp)\n return moves", "def read_move(self, steps):\n res = []\n size = len(steps[0])\n side_size = int(math.sqrt(size))\n for i in range(0, len(steps) - 1):\n state = steps[i]\n next_state = steps[i + 1]\n next_pos = next_state.index(0)\n pos = state.index(0)\n rel = next_pos - pos\n direction = 'up'\n if rel == 1:\n direction = 'right'\n if rel == -1:\n direction = 'left'\n if rel == side_size:\n direction = 'down'\n res.append(direction)\n return res", "def getMove(self):\n while True:\n try:\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(init) != 2) or (init[0] not in range(1, self.grid.width+1)) or (init[1] not in range(1, self.grid.height+1)):\n print 'Initial position is not valid.'\n init = tuple(int(str.strip()) for str in raw_input('Choose the initial position of your move: ').split(','))\n\n while True:\n try:\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n break\n except ValueError:\n print(\"Input is not integer.\")\n\n while (len(dest) != 2) or (dest[0] not in range(1, self.grid.width+1)) or (dest[1] not in range(1, self.grid.height+1)):\n print 'Destination position is not valid.'\n dest = tuple(int(str.strip()) for str in raw_input('Choose the destination position of your move: ').split(','))\n\n return (init, dest)", "def move(x,y):\r\n pass", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def forward(self, *inputs):\n raise NotImplementedError", "def move_wrapper(board, player, trials):\r\n move = mm_move(board, player)\r\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\r\n return move[1]", "def move_wrapper(board, player, trials):\r\n move = mm_move(board, player)\r\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\r\n return move[1]", "def move_wrapper(board, player, trials):\n move = mm_move(board, player)\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\n return move[1]", "def move_wrapper(board, player, trials):\n move = mm_move(board, player)\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\n return move[1]", "def process_inputs(self, inputs):", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def process(self, inputs):\n output = None\n return output", "def move_and_process_input(batch):\n x, y = batch\n x = x.to(device).float()\n y = torch.as_tensor(y).to(device)\n x = x.permute(0, -1, 1, 2, 3)\n return x, y", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_sphere_sampler.move(std_gcmc_sphere_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_sphere_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_sphere_sampler.n_accepted <= n_moves\n assert len(std_gcmc_sphere_sampler.Ns) == n_moves\n assert len(std_gcmc_sphere_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_sphere_sampler.energy, Quantity)\n assert std_gcmc_sphere_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def step(self, inputs=None, targets=None):\n if not self.training:\n self.train_mode()\n\n outputs, loss = self.forward(\n inputs=inputs,\n targets=targets\n )\n\n self.update(\n loss=loss,\n inputs=inputs,\n targets=targets,\n outputs=outputs\n )\n\n return outputs, loss", "def run(self,\n inputs: Sequence[np.ndarray],\n verbose: bool=False) -> Tuple[Sequence[np.ndarray], Sequence[np.ndarray]]:\n\n inputs_concat = [inp[t,:] for inp in inputs for t in range(inp.shape[0])]\n\n steps = np.sum([i.shape[0] for i in inputs])\n if verbose:\n print(f\"Running on {len(inputs)} inputs ({steps} steps)\")\n\n ## Autochecks of inputs\n self._autocheck_io(inputs=inputs_concat)\n\n all_outputs = []\n all_states = []\n for i in range(len(inputs)):\n internal_pred = []; output_pred = []\n for t in range(inputs[i].shape[0]):\n output, state = self.compute_output(inputs_concat[i+t])\n internal_pred.append(state)\n output_pred.append(output)\n all_states.append(np.asarray(internal_pred))\n all_outputs.append(np.asarray(output_pred))\n\n # return all_outputs, all_int_states\n return all_outputs, all_states", "def DoMove(position, move):\n return position - move", "def scan_move(self, *args, **kwargs):\n return self(AbilityId.SCAN_MOVE, *args, **kwargs)", "def out(self, inputs):", "def install_inputs():\n dest = os.path.join(safe_dir, \"input\")\n sys.stdout.write(\"Moving directory %r to %r...\\n\" % (\"input\", dest))\n try:\n shutil.move(\"input\", dest)\n except (OSError, shutil.Error), exc:\n sys.sdterr.write(\"Failed to move %r to %r\\n\" % (\"input\", dest))\n sys.sdterr.write(\" %s\\n\" % exc)\n return 1\n undo_actions.append(restore_inputs)\n\n source = os.path.join(ref_test_data.test_data_dir, \"input\")\n sys.stdout.write(\"Copying directory %r to %r...\\n\" % (source, \"input\"))\n try:\n shutil.copytree(source, \"input\")\n except (OSError, shutil.Error), exc:\n sys.sdterr.write(\"Failed to move %r to %r\\n\" % (source, \"input\"))\n sys.sdterr.write(\" %s\\n\" % exc)\n return 1\n undo_actions.append(remove_test_input)\n\n return 0", "def forward(self, inputs):\n if isinstance(inputs, (list, tuple)):\n return (self.layers(inputs[0]), *inputs[1:])\n else:\n return self.layers(inputs)", "def Move(args):\n\n parser = argparse.ArgumentParser(usage='mv [Options] sources... dest',\n description=Move.__doc__)\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '-f', '--force', dest='force', action='store_true',\n default=False,\n help='force, do not error it files already exist.')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('dest')\n\n options = parser.parse_args(args)\n\n if options.verbose:\n print('mv %s %s' % (' '.join(options.srcs), options.dest))\n\n for src in options.srcs:\n MovePath(options, src, options.dest)\n return 0", "def move(cmd:str, positive_pin:int=21, negative_pin:int=20):\n if (cmd is \"CCW\"):\n print(\"Moving counter clockwise\")\n PIN1 = pin1\n PIN2 = pin2\n else:\n PIN1 = pin2\n PIN2 = pin1\n print(\"Moving clockwise\")\n GPIO.output(PIN1, 0)\n GPIO.output(PIN2, 1)", "def _get_move_actions(self, exclude=None):\n rtn = []\n\n # Check for moving up\n can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER}\n if can_move_up:\n if self._surrounded():\n rtn.append({\n 'func': '_move',\n 'args': (self._pos + _Vec3(0, 1, 0),)\n })\n else:\n rtn.append({\n 'func': '_move_up',\n 'args': (exclude,)\n })\n\n # Check for moving down\n hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))\n if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:\n rtn.append({'func': '_move_down'})\n\n # Check for side moves \n for dir_ in _adj_dirs():\n rtn.extend(self._side_moves(dir_, can_move_up))\n\n return rtn", "def computer_move(board,move,player):\r\n com_execution(board, move, player)", "def forward(self, inputs, prev_state):\n output = []\n state = {\n 'controller_state': prev_state['controller_state'],\n 'memory_state': prev_state['memory_state']\n }\n steps = inputs.shape[1]\n batch_size = inputs.shape[0]\n batch_history_read = torch.zeros((batch_size, steps, self.memory.num_read_heads, self.memory.num_rows))\n batch_history_write = torch.zeros((batch_size, steps, self.memory.num_write_heads, self.memory.num_rows))\n\n for i in range(steps):\n controller_state = self.controller(inputs[:, i, :], state['controller_state'])\n\n controller_output = controller_state[0]\n\n read_vector, memory_state = self.memory(self.layer_norm(self._clip_if_enabled(controller_output)), state['memory_state'])\n state = {\n 'controller_state': controller_state,\n 'memory_state': memory_state\n }\n\n for batch in range(batch_size):\n batch_history_read[batch][i] = memory_state['read_weights'][batch]\n batch_history_write[batch][i] = memory_state['write_weights'][batch]\n\n dropped_controller_output = self.dropout(controller_output)\n read_vector = torch.flatten(read_vector, start_dim=1)\n input_final_layer = torch.cat((dropped_controller_output, read_vector), 1)\n final_output = self.linear(input_final_layer)\n output.append(final_output)\n \n # we are interested only on the last output of the sequence\n out = output[-1]\n return out, state, batch_history_read, batch_history_write", "def forward(self, *inputs: Union[List[torch.Tensor], torch.Tensor]) -> Union[Tuple[torch.Tensor, ...], torch.Tensor]:\n processed_inputs: List[torch.Tensor] = preprocess_cell_inputs(self.num_predecessors, *inputs)\n states: List[torch.Tensor] = self.preprocessor(processed_inputs)\n for ops, inps in zip(\n cast(Sequence[Sequence[LayerChoice]], self.ops),\n cast(Sequence[Sequence[InputChoice]], self.inputs)\n ):\n current_state = []\n for op, inp in zip(ops, inps):\n current_state.append(op(inp(states)))\n current_state = torch.sum(torch.stack(current_state), 0)\n states.append(current_state)\n this_cell = torch.cat([states[k] for k in self.output_node_indices], self.concat_dim)\n return self.postprocessor(this_cell, processed_inputs)", "def get_moves(self):", "def next_inputs(self, time, outputs, state, sample_ids, name=None):\n del sample_ids # unused by next_inputs\n\n # Check if decoding is finished.\n finished = self.__is_decoding_finished(next_time=time + 1,\n outputs=outputs)\n\n # Use the last steps outputs as the next steps inputs.\n # When using the Tacotron reduction factor r the RNN produces an output of size\n # r * `input_size`. But it only takes input of size `input_size`.\n # We will therefore only pass every r'th frame to the next decoding step.\n next_inputs = outputs[:, -self._input_size:]\n\n # Use the resulting state from the last step as the next state.\n next_state = state\n\n return finished, next_inputs, next_state", "def forward(self, inputs):\n # unpack\n x, lengths = pad_packed_sequence(inputs, batch_first=True)\n # x is (batch, slen)\n x = embedded_dropout(self.embed, x,\n dropout=self.dropout_e if self.training else 0)\n # x is (batch, seq, dim_e)\n # but we need x to be (slen, batch, dim_e)\n x = x.transpose(0, 1)\n\n # rnn\n x = pack_padded_sequence(x, lengths, batch_first=False)\n x, (_, _) = self.lstm(x)\n # x is (slen, batch, num_directions * hidden_size)\n x, lengths = pad_packed_sequence(x, batch_first=False)\n rnn_out = x\n # turn into batch first (batch, slen, num_dirs * hiddden)\n # and then (batch, num_dirs * hidden, slen)\n x = x.transpose(0, 1).transpose(1, 2)\n x = F.relu(x)\n x = F.max_pool1d(x, x.size(2)) # (batch, num_dirs * hidden, 1)\n x = x.squeeze(2)\n\n # do some dropout\n x = self.dropout(x)\n # map to classes\n x = self.output(x)\n return x, rnn_out", "def command_moves(board, locations):\n possible_moves = []\n buffers = [(1,0), (0,1), (-1,0), (0,-1)]\n\n for piece in locations:\n piece_moves = []\n\n for move in buffers:\n\n poss_move = return_valid_move(board, locations, piece, move)\n\n if poss_move:\n piece_moves.append(poss_move)\n\n possible_moves.append(piece_moves)\n\n return possible_moves", "def shift(self, inputs):\n try:\n if type(inputs).__name__ != 'list' \\\n or type(inputs[0]).__name__ != 'str' \\\n or type(inputs[1]).__name__ != 'list' \\\n or type(inputs[2]).__name__ != 'str':\n return None\n except IndexError:\n return None\n if inputs[2].lower() == 'safe': # safe word entered\n self._lock.acquire()\n try:\n self._aggro = aggroMgr._DEFAULT\n finally:\n self._lock.release()\n elif inputs[2].lower() == 'enrage': # insta anger word\n self._lock.acquire()\n try:\n self._aggro = aggroMgr.ENRAGED\n finally:\n self._lock.release()\n else: # normal shift\n beliefs = self._beliefs\n aggroWords = self._aggroWords\n delta = 0\n if self.query() == 0:\n # if bot is \"happy\", believe anything that's not a question\n # and has hamster in it. don't bother with beliefs if happy\n from beliefSys import separate as sep\n if '?' not in inputs[0] \\\n and 'hamster' in sep(inputs[0], True):\n beliefs.add(inputs[0])\n else:\n delta = beliefs.query(inputs[0])\n delta += self._shiftAggroWds(inputs[1])\n from time import time as time\n self._lock.acquire() # wait until run() finishes writing\n try:\n aggro = self._aggro\n if aggro+delta <= aggroMgr.AGGRO_MIN_VALUE:\n self._aggro = aggroMgr.AGGRO_MIN_VALUE\n elif aggro+delta >= aggroMgr.AGGRO_MAX_VALUE:\n self._aggro = aggroMgr.AGGRO_MAX_VALUE\n else:\n self._aggro += delta\n # set time to reflect that input was just received\n # so that it doesn't cooloff\n self._last_input = time()\n finally:\n self._lock.release()", "def forward(self, inputs, memories, mask):\n memory = self.get_go_frame(inputs).unsqueeze(0)\n memories = self._reshape_memory(memories)\n memories = torch.cat((memory, memories), dim=0)\n memories = self._update_memory(memories)\n memories = self.prenet(memories)\n\n self._init_states(inputs, mask=mask)\n self.attention.init_states(inputs)\n\n outputs, stop_tokens, alignments = [], [], []\n while len(outputs) < memories.size(0) - 1:\n memory = memories[len(outputs)]\n decoder_output, attention_weights, stop_token = self.decode(memory)\n outputs += [decoder_output.squeeze(1)]\n stop_tokens += [stop_token.squeeze(1)]\n alignments += [attention_weights]\n\n outputs, stop_tokens, alignments = self._parse_outputs(\n outputs, stop_tokens, alignments)\n return outputs, alignments, stop_tokens", "def forward(self, inputs):\n batch_size = len(inputs.batch_sizes)\n # unpack\n x, lengths = pad_packed_sequence(inputs, batch_first=True)\n # x is (batch, slen)\n batch_size = x.size(0)\n x = self.embed(x) # x is (batch, seq, dim_e)\n # but we need x to be (slen, batch, dim_e)\n x = x.transpose(0, 1)\n\n # rnn\n x = pack_padded_sequence(x, lengths, batch_first=False)\n _, (x, _) = self.lstm(x) # we want x=$h_(t=slen)$ (last hidden output)\n # we have x as (num_directions=2, batch, hidden)\n # we transpose it to (batch, num_direction, hidden)\n # than we view it as (batch, 1, hidden * 2) and finally we\n # squeeze it to (batch, hidden * 2) because we wanted to have\n # the directions concatenated as a single feature vector (per input)\n x = x.transpose(0, 1).contiguous().view(batch_size, 1, -1).squeeze(1)\n # we need to have squeeze(dim=1) because of the batch_size=1 case\n\n # do some dropout\n x = self.dropout(x)\n # map to classes\n x = self.output(x)\n return x", "def get_input(inputs):\n return input(inputs)", "def get_next_move(self):\n return int(input('Enter your move: '))", "def move(source, dest, speed=0):\n norm = normalise(source, dest)\n new_pos = (source[0] + norm[0], source[1] + norm[1])\n return new_pos", "def get_move_from_user(self):\n user_input = input(\"Move: \")\n if user_input == 'undo':\n return user_input\n try:\n move_list = user_input.split(\" \")\n move_list[1] = int(move_list[1])\n except:\n move_list = ['XX', 0, 'XX']\n return move_list", "def getMove(self, grid):\n\n cells = grid.getAvailableCells()\n\n while True:\n moveInput = input(\"Enter your move: \")\n \n if re.match(r\"place \\d,\\d\", moveInput) or re.match(r\"erase \\d,\\d\", moveInput):\n move = moveInput.split()\n action = move[0]\n pos = move[1].split(',')\n\n if (action == \"place\" and (int(pos[0]), int(pos[1])) in cells) or (action == \"erase\" and grid.getCellValue((int(pos[0]), int(pos[1]))) != 'T'):\n return [move[0], (int(pos[0]), int(pos[1]))]\n \n elif moveInput == \"restart\":\n return -1\n \n elif moveInput == \"show solution\":\n return 0\n \n print(\"Move not valid\")", "def possible_moves(state):\n sliders = state['sliders']\n pins = state['pins']\n result = []\n\n # this is a bit repetitive -- could theoretically generalize?\n for i, pin in enumerate(pins):\n x, y = pin\n for dy, direction in [(1, 'down'), (-1, 'up')]:\n new_pin = (x, y+dy)\n move = 'move pin {0} {1}'.format(i, direction)\n if all_open([new_pin], sliders):\n new_state = deepcopy(state)\n new_state['pins'][i] = new_pin\n result.append((move, new_state))\n\n for i, slider in enumerate(sliders):\n coords, offset = slider\n for dx, direction in [(1, 'left'), (-1, 'right')]:\n new_slider = (coords, offset+dx)\n move = 'move slider {0} {1}'.format(i, direction)\n if all_open(pins, [new_slider]):\n new_state = deepcopy(state)\n new_state['sliders'][i] = new_slider\n result.append((move, new_state))\n\n return result", "def apply_moves(programs, moves):\n for move in moves:\n if 's' in move:\n # Spin\n num = int(move[1:])\n programs = programs[-num:] + programs[:len(programs) - num]\n else:\n if 'x' in move:\n # Exchange\n i, j = [int(x) for x in move[1:].split('/')]\n else:\n # Partner\n i, j = [programs.index(x) for x in move[1:].split('/')]\n programs[i], programs[j] = programs[j], programs[i]\n return programs", "def apply(self, inputs):\n raise NotImplementedError()", "def _parse_move(origin, destination, axis):\n # If only one set of coordinates is defined, make sure it's used to move things\n if destination is None:\n destination = origin\n origin = [0, 0]\n\n d = _parse_coordinate(destination)\n o = _parse_coordinate(origin)\n if axis == \"x\":\n d = (d[0], o[1])\n if axis == \"y\":\n d = (o[0], d[1])\n dx, dy = np.array(d) - o\n\n return dx, dy", "def transduce(self, inpSequence=list()):\n\n lstOutputs = list()\n\n # Initialize the state machine\n self.initialize()\n\n # Check if inpSequence is non-empty\n if len(inpSequence) == 0:\n raise ValueError(\"Empty Input Sequence\")\n\n # Loop and apply the inputs\n for i in inpSequence:\n lstOutputs.append(self.step(i))\n # try:\n # lstOutputs.append(self.step(i))\n # except Exception, ex:\n # lstOutputs.append(None)\n # print 'Step function failed, for input: ', i, ' ', ex.message\n\n return lstOutputs", "async def move(self, board, valid_actions):\n self._move = None\n output_move_row = Value('d', -1)\n output_move_column = Value('d', 0)\n try:\n # await self.search(board, valid_actions) \n p = Process(\n target=self.search, \n args=(\n self._color, board, valid_actions, \n output_move_row, output_move_column))\n p.start()\n while p.is_alive():\n await asyncio.sleep(0.1)\n self._move = np.array([output_move_row.value,output_move_column.value],dtype=np.int32)\n except asyncio.CancelledError as e:\n print('The previous player is interrupted by a user or a timer.')\n except Exception as e:\n print(type(e).__name__)\n print('move() Traceback (most recent call last): ')\n traceback.print_tb(e.__traceback__)\n finally:\n p.kill()\n self._move = np.array(\n [output_move_row.value, output_move_column.value],\n dtype=np.int32)\n return self.best_move", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, inputs, outputs):\n if len(inputs) > 1:\n np.copyto(outputs[0], np.sum(inputs, 0))\n else:\n np.copyto(outputs[0], inputs[0])", "def map_inputs(self, inputs=[],undo=False):\n if undo:\n return self._map_inputs_undo(inputs)", "def forward(self, input1, input2):\n output1 = self.forward_once(input1)\n output2 = self.forward_once(input2)\n return output1, output2", "def call(self, inputs, **kwargs): # pylint: disable=unused-argument\n return inputs", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def interactive_strategy(game: Any) -> Any:\r\n move = input(\"Enter a move: \")\r\n return game.str_to_move(move)", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def interactive_strategy(game: Any) -> Any:\n move = input(\"Enter a move: \")\n return game.str_to_move(move)", "def toMoves(self, results):\n moves = ArrayList()\n for result in results:\n moves.add(Move(result.get(1)))\n return moves", "def feedforward(self, inputs):\n # hidden activations\n # a_hidden = self.transfer(np.dot(self.w_input, inputs))\n a_hidden1 = self.transfer(np.dot(inputs, self.w_input))\n \n dots1 = (np.dot(a_hidden1, self.w_middle))\n a_hidden2 = self.transfer(np.asarray(dots1))\n \n #a_output = self.transfer(np.dot(self.w_output, a_hidden))\n dots2 = (np.dot(a_hidden2, self.w_output))\n a_output = self.transfer(np.asarray(dots2))\n \n return (a_hidden1, a_hidden2, a_output)", "def forward(self, inputs: Tensor) -> Tensor:\n return self.sequential(inputs).transpose(1, 2)", "def forward(self, inputs):\n # unpack\n x, lengths = pad_packed_sequence(inputs, batch_first=True)\n # x is (batch, slen)\n x = self.embed(x) # x is (batch, seq, dim_e)\n # but we need x to be (slen, batch, dim_e)\n x = x.transpose(0, 1)\n\n # rnn\n x = pack_padded_sequence(x, lengths, batch_first=False)\n x, (_, _) = self.lstm(x)\n # x is (slen, batch, num_directions * hidden_size)\n x, lengths = pad_packed_sequence(x, batch_first=False)\n # turn into batch first (batch, slen, num_dirs * hiddden)\n # and then (batch, num_dirs * hidden, slen)\n x = x.transpose(0, 1).transpose(1, 2)\n x = F.relu(x)\n x = F.max_pool1d(x, x.size(2)) # (batch, num_dirs * hidden, 1)\n x = x.squeeze(2)\n\n # do some dropout\n x = self.dropout(x)\n # map to classes\n x = self.output(x)\n return x", "def test_move(self):\n neq_gcmc_system_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_system_sampler.move(neq_gcmc_system_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_system_sampler.n_moves == 1\n assert 0 <= neq_gcmc_system_sampler.n_accepted <= 1\n assert len(neq_gcmc_system_sampler.Ns) == 1\n assert len(neq_gcmc_system_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_system_sampler.velocities, Quantity)\n assert neq_gcmc_system_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_system_sampler.insert_works) + len(neq_gcmc_system_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_system_sampler.n_explosions <= 1\n\n return None", "def main():\n moveList = ('R4, R3, L3, L2, L1, R1, L1, R2, R3, L5, L5, R4, L4, R2, R4, '\n 'L3, R3, L3, R3, R4, R2, L1, R2, L3, L2, L1, R3, R5, L1, L4, '\n 'R2, L4, R3, R1, R2, L5, R2, L189, R5, L5, R52, R3, L1, R4, '\n 'R5, R1, R4, L1, L3, R2, L2, L3, R4, R3, L2, L5, R4, R5, L2, '\n 'R2, L1, L3, R3, L4, R4, R5, L1, L1, R3, L5, L2, R76, R2, R2, '\n 'L1, L3, R189, L3, L4, L1, L3, R5, R4, L1, R1, L1, L1, R2, '\n 'L4, R2, L5, L5, L5, R2, L4, L5, R4, R4, R5, L5, R3, L1, L3, '\n 'L1, L1, L3, L4, R5, L3, R5, R3, R3, L5, L5, R3, R4, L3, R3, '\n 'R1, R3, R2, R2, L1, R1, L3, L3, L3, L1, R2, L1, R4, R4, L1, '\n 'L1, R3, R3, R4, R1, L5, L2, R2, R3, R2, L3, R4, L5, R1, R4, '\n 'R5, R4, L4, R1, L3, R1, R3, L2, L3, R1, L2, R3, L3, L1, L3, '\n 'R4, L4, L5, R3, R5, R4, R1, L2, R3, R5, L5, L4, L1, L1')\n moveList = moveList.replace(' ', '').split(',')\n\n elf = Path()\n\n for move in moveList:\n start = [elf.x, elf.y]\n print('Elf turning {} and walking for {} steps.').format(\n move[0], move[1:])\n elf.move(move[0], move[1:])\n end = [elf.x, elf.y]\n if(addMoveToList(elf, start, end)):\n break\n print('Elf ended in position {},{}').format(elf.x, elf.y)\n print('Shortest distance from origin to EB HQ is: {}').format(\n abs(elf.x) + abs(elf.y))", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def calc(self, inputs):\n return [neuron.output(inputs) for neuron in self._neurons]", "def movement_processor(stage, player,\n stage_tiles,command,\n special,items):\n player_x = player[0]\n player_y = player[1]\n movement = command[1].strip().lower()\n\n # Change player's co-ordinates appropriately\n if movement == \"down\" or movement == \"south\" or movement == \"d\":\n player_y = player_y - 1\n elif movement == \"up\" or movement == \"north\" or movement == \"u\":\n player_y = player_y + 1\n elif movement == \"right\" or movement == \"east\" or movement == \"r\":\n player_x = player_x + 1\n elif movement == \"left\" or movement == \"west\" or movement == \"l\":\n player_x = player_x - 1\n\n # Movement validation\n player_new = [player_x, player_y]\n\n # Special Checker\n special, items, valid = special_condition_checker(special, items, player_new)\n if valid:\n pass \n else:\n player_new = player\n player_new.append(True) \n return player_new, special, items\n\n # Boundary and Collision checking\n valid = boundary_checker(stage, player_new)\n if valid:\n valid = tile_checker(stage_tiles, player_new) \n # Reset the program\n if not valid:\n player_new = player\n player_new.append(False)\n else:\n player_new.append(True)\n \n\n return player_new, special, items", "def step(self, move):", "def actions_turn(positions, player1, player2, creatures):\n\n attacking = []\n moving = []\n\n # Split the orders within attacks and movements\n for character in positions:\n if character in creatures:\n if positions[character]['order'] == 'attack':\n attacking += character\n else:\n moving += character\n elif character in player1:\n if positions[character]['order'] == 'attack':\n attacking += character\n else:\n moving += character\n\n else:\n if positions[character]['order'] == 'attack':\n attacking += character\n else:\n moving += character\n\n # Execute the attacks\n for character in attacking:\n # First attacking : the creatures\n if character in creatures:\n hero_name = character\n name_attack = positions[character]['name_attack']\n attack_coord = positions[character]['where']\n attack(positions, hero_name, name_attack, (0, 0), attack_coord, player1, player2, creatures)\n # Then the heroes of the first player\n elif character in player1:\n hero_name = character\n name_attack = positions[character]['name_attack']\n attack_coord = positions[character]['where']\n attack(positions, hero_name, name_attack, (0, 0), attack_coord, player1, player2, creatures)\n # Finally the ones of the second player\n else:\n hero_name = character\n name_attack = positions[character]['name_attack']\n attack_coord = positions[character]['where']\n attack(positions, hero_name, name_attack, (0, 0), attack_coord, player1, player2, creatures)\n\n for character in moving:\n print(character, moving)\n # First moving : the creatures\n if character in creatures:\n hero_name = character\n movement_coord = positions[character]['where']\n move(hero_name, positions, movement_coord)\n\n # Then the heroes of the first player\n elif character in player1:\n hero_name = character\n movement_coord = positions[character]['where']\n move(hero_name, positions, movement_coord)\n\n # Finally the ones of the second player\n else:\n hero_name = character\n movement_coord = positions[character]['where']\n move(hero_name, positions, movement_coord)\n\n return player1, player2, positions, creatures", "def call(self, inputs, *args, **kwargs):\n\n outputs = self.mlp(inputs, *args, **kwargs)\n return outputs", "def update(self, inputs): # pragma: no cover\n return inputs", "def get_move() -> str:\n msg = 'Enter a move for that section (C to check, S to swap, R to rotate): '\n move = input(msg)\n while not wf.is_valid_move(move):\n print('Invalid move!')\n move = input(msg) \n return move", "def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())", "def sim_move(self, state, move):\n out = ''\n for val in self.moves[move]:\n out += state[val]\n return out", "def test_move(self):\n neq_gcmc_sphere_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_sphere_sampler.move(neq_gcmc_sphere_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_sphere_sampler.n_moves == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_accepted <= 1\n assert len(neq_gcmc_sphere_sampler.Ns) == 1\n assert len(neq_gcmc_sphere_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_sphere_sampler.velocities, Quantity)\n assert neq_gcmc_sphere_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_sphere_sampler.insert_works) + len(neq_gcmc_sphere_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_left_sphere <= 1\n assert 0 <= neq_gcmc_sphere_sampler.n_explosions <= 1\n\n return None", "def test_get_move_interface(self):\n h, w = 9, 9 # board size\n test_depth = 1\n starting_location = (2, 7)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n search_method = \"minimax\"\n heuristic = lambda g, p: 0. # return 0 everywhere\n\n # create a player agent & a game board\n agentUT = game_agent.CustomPlayer(\n test_depth, heuristic, iterative_search, search_method)\n\n # Test that get_move returns a legal choice on an empty game board\n board = isolation.Board(agentUT, 'null_agent', w, h)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on an \" +\n \"empty board. It should return coordinates on the \" +\n \"game board for the location of the agent's next \" +\n \"move. The move must be one of the legal moves on \" +\n \"the current game board.\"))\n\n # Test that get_move returns a legal choice for first move as player 2\n board = isolation.Board('null_agent', agentUT, w, h)\n board.apply_move(starting_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed making the first \" +\n \"move as player 2 on a new board. It should return \" +\n \"coordinates on the game board for the location \" +\n \"of the agent's next move. The move must be one \" +\n \"of the legal moves on the current game board.\"))\n\n # Test that get_move returns a legal choice after first move\n board = isolation.Board(agentUT, 'null_agent', w, h)\n board.apply_move(starting_location)\n board.apply_move(adversary_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on a \" +\n \"game in progress. It should return coordinates on\" +\n \"the game board for the location of the agent's \" +\n \"next move. The move must be one of the legal moves \" +\n \"on the current game board.\"))", "def make_move(self, playername, coordinates, direction):\n\n pass", "def drive(self, goalstates, inputs):\n # extract start and goal states from environment\n path_list = []\n destination_reached = []\n action_order = []\n path_lengths = []\n start = self.state\n\n # for all goal states do\n # goalReached,path= A Star(start, goal)\n for goal in goalstates:\n goalReached, path = self.AStar(start[\"location\"], goal, inputs)\n destination_reached.append(goalReached)\n path_list.append(path)\n path_lengths.append(len(path))\n\n # Find best path from all paths received [1 path received for 1 goal]\n if True in destination_reached:\n # Best path, would the shortest path in case of goal is reachable \n best_path = [path_list[i] for i in range(len(path_list)) if (destination_reached[i] == True) and (len(path_list[i]) == min(path_lengths))]\n action_order.extend(best_path)\n else:\n # otherwise it would be the longest path, how far traveled before being blocked \n longest_path = [path_list[i] for i in range(len(path_list)) if (len(path_list[i]) == max(path_lengths))]\n action_order.extend(longest_path)\n\n # Compute action sequence for best path\n movements = {\n (0, 3): \"forward-3x\", \n (0, 2): \"forward-2x\", \n (0, 1): \"forward\", \n (-1, 1): \"left\", \n (1, 1): \"right\", \n (0, 0): None}\n try:\n action_sequence = [movements[(action_order[0][i+1][0] - action_order[0][i][0], action_order[0][i+1][1] - action_order[0][i][1])] for i in range(len(action_order[0])-1)]\n except:\n action_sequence = [None]\n # return action sequence\n return action_sequence", "def move(self, state):\n result = None\n self.currentDepthLimit = 0\t\n\tself.transposition = {}\n\tself.counter = 0\n\n\twhile True:\n u = float(\"inf\")\n\t v = float(\"-inf\")\n\t self.counter = 0\n\t result = None\n\t self.transposition = {}\n\t for a in state.actions():\n new = self.min_value(state.result(a), float(\"-inf\"), float(\"inf\"),self.currentDepthLimit)\n\t if new > v:\n\t v = new\n\t result = a\n\n\t elif new == v:\n\t if a.index < result.index:\n\t result = a\n\t if self.is_time_up():\n\t return result\n\t \n\t self.currentDepthLimit += 1\n\t \"\"\"If we never use evaluate function, it means all state are terminated, so return whatever the result is\"\"\"\n\t if self.counter == 0:\n\t break\n\t if self.is_time_up():\n \t return result\n\treturn result", "def step(self,inp): ## function responsible for exciting the machine with a SINGLE INPUT VALUE\n (s, o) = self.getNextValues(self.state,inp)\n # will store the state and return the output\n self.state =s\n return o", "def upgrade_outputs(outputs):\n return [upgrade_output(op) for op in outputs]", "def step(self, actions: np.ndarray) -> Tuple[np.ndarray, np.ndarray, bool, str]:\n\n int_actions = [np.where(r == 1)[0][0] for r in np.vstack(actions)]\n for agent in range(self.agents):\n action = int_actions[agent]\n valid_move = self.game.submit_move_for_agent(\n agent, constants.ACTIONS[action]\n )\n\n self.turns_count += 1\n done = np.array([False] * self.agents).reshape(1, -1)\n\n if self.game.check_all_arrived() or self.turns_count >= self.max_turns:\n if self.game.check_all_arrived():\n print(\"reached goals after %i \" % self.turns_count)\n done = np.array([True] * self.agents).reshape(1, -1)\n\n rewards = self.compute_reward(done)\n\n if self.use_alternative_states:\n states = self.make_alternative_states()\n else:\n states = self.make_states()\n\n return states, rewards, done, {\"Not Implemented\": \"\"}", "def calculate(self, inputs):\r\n output = inputs\r\n for layer in self.layers:\r\n output = layer.calculate(output)\r\n return output", "def step(self, actions):\n assert (len(actions) == self.num_actions)\n actions = np.around(actions)\n actions = np.clip(actions, 0, 1)\n self.done = self.network.perform_actions(actions)\n self.cur_pos = self._get_current_pos_in_1d()\n self.reward = self.network.get_reward()\n\n return self.cur_pos, self.reward, self.done, {}", "def moveTower(self, height, fromPole, toPole, withPole, moves, peg_num_disks):\n def moveDisk(fromPole, toPole):\n peg_num_disks[fromPole] = peg_num_disks[fromPole] - 1\n moves.append([ fromPole, peg_num_disks[fromPole] ])\n moves.append([ toPole, peg_num_disks[toPole] ])\n peg_num_disks[toPole] = peg_num_disks[toPole] + 1\n return moves, peg_num_disks\n \n if height >= 1:\n moves, peg_num_disks = self.moveTower(height-1, fromPole, withPole, \n toPole, moves, peg_num_disks)\n moves, peg_num_disks = moveDisk(fromPole, toPole)\n moves, peg_num_disks = self.moveTower(height-1, withPole, toPole, \n fromPole, moves, peg_num_disks)\n return moves, peg_num_disks", "def move(self, coordinates, direction):\n pass", "def result(self, state, action):\n \n worker = state[0]\n boxes = state[1]\n move = action[1]\n coord = action[0] \n newBoxes = []\n \n worker = coord\n \n for box in boxes:\n if box == coord:\n newBox = move_coords(box, move)\n newBoxes.append(newBox)\n else:\n newBoxes.append(box)\n \n newState = ((worker), tuple(newBoxes))\n return newState", "def feedforward(self, inputs):\n # hidden activations\n # a_hidden = self.transfer(np.dot(self.w_input, inputs))\n a_hidden = self.transfer(np.dot(inputs, self.w_input))\n \n #a_output = self.transfer(np.dot(self.w_output, a_hidden))\n dots = (np.dot(a_hidden, self.w_output))\n a_output = self.transfer(np.asarray(dots))\n\n return (a_hidden, a_output)" ]
[ "0.62765896", "0.5847411", "0.58237934", "0.5772807", "0.5766127", "0.56973183", "0.56731117", "0.5606184", "0.54958546", "0.5494064", "0.546905", "0.5464436", "0.54533064", "0.5435174", "0.54249644", "0.54240054", "0.5407606", "0.5406141", "0.5403741", "0.5398427", "0.53847957", "0.53847957", "0.5358958", "0.5358958", "0.5331389", "0.53274536", "0.53273636", "0.53269374", "0.53241235", "0.5322898", "0.53221345", "0.53208166", "0.53193396", "0.5300329", "0.52691287", "0.526683", "0.52666223", "0.5245576", "0.5222945", "0.5213934", "0.52117723", "0.5195351", "0.5184884", "0.5184333", "0.5180894", "0.5179374", "0.5175384", "0.51729316", "0.51627064", "0.5162451", "0.5158837", "0.51541317", "0.5151785", "0.51510155", "0.515101", "0.5149751", "0.5148653", "0.5148373", "0.51434535", "0.51357675", "0.51279366", "0.51218843", "0.5116137", "0.51142323", "0.5103357", "0.5093524", "0.50842816", "0.50838464", "0.5079715", "0.5079715", "0.5079715", "0.5061047", "0.5058794", "0.50574017", "0.505399", "0.50427413", "0.5037837", "0.5032568", "0.502873", "0.5028105", "0.5017686", "0.5016376", "0.49971396", "0.4996516", "0.4990034", "0.49842668", "0.49828982", "0.4977093", "0.49643153", "0.49639484", "0.49606422", "0.49590883", "0.49568993", "0.49566707", "0.49393123", "0.49319598", "0.49302948", "0.49250987", "0.49232107", "0.4920065", "0.49141434" ]
0.0
-1
Create source links to github
def linkcode_resolve(domain, info): if domain != 'py' or not info['module']: return None filename = info['module'].replace('.', '/') return "https://github.com/mathcamp/flywheel/blob/%s/%s.py" % (version_data['ref'], filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repo_link(repo):\n return \"https://github.com/\" + repo", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def transform_github_links(app, doctree, fromdocname):\n\n try:\n target_format = app.builder.link_suffix\n except AttributeError:\n # if the builder has no link_suffix, then no need to modify\n # the current links.\n return\n\n source_suffix = app.config.source_suffix\n # Links are either absolute against the repository or relative to\n # the current document's directory. Note that this is not\n # necessarily app.srcdir, which is the documentation root\n # directory. Instead rely on 'source' attribute of doctree to\n # identify the path of the file providing the current doctree\n try:\n doc_path = doctree.attributes['source']\n doc_dir = os.path.dirname(doc_path)\n except KeyError:\n # some doctrees added by other libraries through dynamic\n # generation do not have a source file. Assume paths are\n # relative to the repo.\n doc_dir = \"\"\n\n for node in doctree.traverse(nodes.reference):\n if 'refuri' not in node:\n continue\n if node['refuri'].startswith('http'):\n continue\n\n try:\n link, anchor = node['refuri'].split('#', 1)\n anchor = '#' + anchor\n except ValueError:\n link = node['refuri']\n anchor = ''\n\n if link is None:\n continue\n\n # Replace the suffix with the correct target format file ending,\n # but only if the link ends with both the correct source suffix\n # and refers to a local file.\n for src_suffix in source_suffix:\n if link.endswith(src_suffix):\n # absolute paths are considered relative to repo\n if link.startswith(\"/\"):\n basepath = \"\"\n # relative paths are against the current doctree source path\n else:\n basepath = doc_dir\n if os.path.exists(os.path.join(basepath, link)):\n node['refuri'] = (link[:-len(source_suffix)] + target_format +\n anchor)", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)", "def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)", "async def source(self, context):\n await context.channel.send(\"https://github.com/balfroim/TengriBOT\")", "async def github(self, ctx: Message):\n\t\tawait self.send(\n\t\t f\"{ctx.author.mention} ㅤㅤ I'm open-source! You can look at my source code here!ㅤ https://github.com/asxlvm/DogeBoss :GitHub:\"\n\t\t)", "async def source(\n self, ctx: Context, *, source_item: SourceConverter = None\n ) -> None:\n if source_item is None:\n embed = discord.Embed(\n title=\"Magoji's Github Repository\",\n description=f\"[Here's the github link!]({GITHUB_REPO_URL})\",\n colour=0x87CEEB,\n )\n await ctx.send(embed=embed)\n return\n embed = self.build_embed(source_item)\n await ctx.send(embed=embed)", "async def github(self, ctx):\n await ctx.send('https://github.com/nick411077/nickcan_bot')", "async def github(self, ctx):\n\n embed = discord.Embed(color=ctx.me.color)\n embed.set_thumbnail(url='https://cdn2.iconfinder.com/data/icons/black-' +\n 'white-social-media/64/social_media_logo_github-512.png')\n embed.add_field(name='🔗 Github Repo',\n value=f'[Klikk her]({self.bot.misc[\"source_code\"]}) for å se den dritt skrevne kildekoden min')\n await Defaults.set_footer(ctx, embed)\n await ctx.send(embed=embed)", "def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))", "def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def make_link_node(rawtext, app, type, slug, options):\n\n try:\n base = app.config.github_project_url\n if not base:\n raise AttributeError\n if not base.endswith('/'):\n base += '/'\n except AttributeError as err:\n raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) from err\n\n ref = base + type + '/' + slug + '/'\n set_classes(options)\n prefix = \"#\"\n if type == 'pull':\n prefix = \"PR \" + prefix\n node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref,\n **options)\n return node", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def __add_gitlinks(self, gitlinks):\n for sha1, path in gitlinks:\n if sha1 == p4gf_const.NULL_COMMIT_SHA1:\n self.__append(\"D {}\\n\".format(path))\n else:\n self.__append(\"M 160000 {0} {1}\\n\".format(sha1, path))", "def make_link_node(rawtext, app, type, slug, options):\r\n\r\n try:\r\n base = app.config.github_project_url\r\n if not base:\r\n raise AttributeError\r\n if not base.endswith('/'):\r\n base += '/'\r\n except AttributeError, err:\r\n raise ValueError('github_project_url configuration value is not set (%s)' % str(err))\r\n\r\n ref = base + type + '/' + slug + '/'\r\n set_classes(options)\r\n prefix = \"#\"\r\n if type == 'pull':\r\n prefix = \"PR \" + prefix\r\n node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref,\r\n **options)\r\n return node", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def github_url(self):\n return self.github.replace('.git', '')", "def new_repo(req, source, psp_dir, url_helper=None):\n req.content_type = 'text/html'\n repo_dir = req.filename.rsplit('/', 1)[0]\n files = [f for f in os.listdir(repo_dir) if f[-3:] == '.h5']\n top_level = psp.PSP(req, filename=psp_dir+'new_repo.psp')\n top_level.run({'context': req.uri,\n 'files': files})", "def fix_links():\n pass", "def main():\n # Step1: generate htmls\n csv_data_path= \"./frontend/html_template_data/dataset.csv\"\n html_template_path = \"./frontend/html_template_data/template.html\"\n html_save_path = \"./frontend/html_files/\"\n\n generate_htmls(csv_data_path, html_template_path, html_save_path)\n\n # Step2: push htmls to Github\n # push htmls to Github Pages, currently manual.", "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def button_github(args):\n cell_source = args[\"cell_source\"]\n repo = get_arg_or_fail(args[\"user\"], \"repo\", \"<org/name>\")\n branch = args[\"user\"].get(\"branch\", \"master\")\n docs_dir, rel_path = split_doc_path(args[\"path\"])\n\n # Buttons use OSS URLs.\n if str(docs_dir) == \"g3doc/en\":\n docs_dir = pathlib.Path(\"site/en\")\n\n base_url = f\"github.com/{repo}/blob/{branch}\"\n this_url = \"https://\" + str(base_url / docs_dir / rel_path)\n\n if is_button_cell_re.search(cell_source) and cell_source.find(this_url) != -1:\n return True\n else:\n fail(\n f\"GitHub button URL doesn't match: {this_url}\",\n fix=fix.regex_between_groups_replace_all,\n fix_args=[r\"(href.*)http.*?(\\\\\\\".*GitHub-Mark-32px.png)\", this_url])", "def replace_local_hyperlinks(\n text,\n base_url=\"https://github.com/project-rig/nengo_spinnaker/blob/master/\"\n ):\n def get_new_url(url):\n return base_url + url[2:]\n\n # Deal with anonymous URLS\n for match in re.finditer(r\"^__ (?P<url>\\./.*)\", text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\"^__ {}\".format(orig_url),\n \"__ {}\".format(url), text, flags=re.MULTILINE)\n\n # Deal with named URLS\n for match in re.finditer(r\"^\\.\\. _(?P<identifier>[^:]*): (?P<url>\\./.*)\",\n text, re.MULTILINE):\n identifier = match.groupdict()[\"identifier\"]\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\n \"^\\.\\. _{}: {}\".format(identifier, orig_url),\n \".. _{}: {}\".format(identifier, url),\n text, flags=re.MULTILINE)\n\n # Deal with image URLS\n for match in re.finditer(r\"^\\.\\. image:: (?P<url>\\./.*)\",\n text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = text.replace(\".. image:: {}\".format(orig_url),\n \".. image:: {}\".format(url))\n\n return text", "def _create_links(self):\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))", "def getProjectURL():", "def make_links(self):\n for filepath in list(self):\n self.make_link(filepath)", "def _generate_links(self):\n index = 0\n links = \"\"\n for ch in self.text:\n if ch == '[':\n links += \"(^\"\n elif ch == ']':\n links += \")$|\"\n index += 1\n elif links[-1:] != '|' and links != \"\":\n links += ch\n self.links = compile(links[:-1].lower())", "def gen_links(text):\n return []", "def github_setup(request, integration_test_setup):\n repo_owner_type = request.param[0]\n repo_type = request.param[1]\n git_command = request.param[2]\n configholder = request.param[3]\n target = request.param[4]\n return get_github_repos(\n repo_owner_type, repo_type, git_command, configholder, target\n )", "def repository_create_hosted():\n pass", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def get_links(match_set, sha_validation=validate_sha_github):\n links = []\n for ticket in match_set.tickets:\n links.append(ticket_url % ticket)\n for PR in match_set.github_PRs:\n links.append(github_PR_url % PR)\n\n # validate github changeset SHA's\n for c in match_set.github_changesets:\n if sha_validation and sha_validation(c):\n links.append(github_changeset_url % c)\n\n return links", "def links(self):\r\n return links.RepoLinks(self)", "def create_links(self, name):\n for target, linknames in self._link_map.iteritems():\n for linkname in linknames:\n self._api.path.mock_copy_paths(target, linkname)\n self._api.python(\n name,\n self._resource,\n args = [\n '--link-json',\n self._api.json.input({str(target) : linkname\n for target, linkname in self._link_map.iteritems()\n }),\n ],\n infra_step=True)", "def addPRLinks(sourcesDF, prData):\n def addPRLinkToPaperRow(row):\n prLinks = []\n doi = str(row.doi)\n if len(doi) < 1:\n return\n else:\n for PR in prData:\n # TODO: use manubot to keep all DOIs consistent so that there will be no issues with short DOIs not matching up. (here and elsewhere)\n if doi in PR[\"diff\"]:\n prLinks.append(PR[\"pull_request_link\"])\n prLinksString = \",\".join(prLinks)\n return prLinksString\n\n sourcesDF[\"gh_pull_request_links\"] = sourcesDF.apply(addPRLinkToPaperRow, axis=1)\n return sourcesDF", "def linkAssets(des, Xrc):\n with open(des, 'r') as f:\n body = f.read()\n f.close()\n with open(des, 'w') as f:\n body = body.replace(\"custom.css\", \"\\\\\" + Xrc[\"gh_repo_name\"] + \"/Assets\" + \"/css\" + \"/custom.css\")\n f.write(body)\n f.close()\n ccc.success(\"linking assets to \" + des)", "def reposetup(ui, repo, **kwargs):\n if len(getattr(repo, \"changelog\", [])) == 0:\n return\n hggit_reposetup(ui, repo, **kwargs)\n bb = \"ssh://[email protected]/\"\n for pathname, path in ui.configitems(\"paths\"):\n if path.startswith(bb):\n user, project = path.replace(bb, \"\").split(\"/\", 1)\n # Strip slash and everything after it,\n # such as mq patch queue path.\n project = project.split(\"/\")[0]\n for k, v in ui.configitems(\"github\"):\n if k == \"username\":\n user = v\n gh_path = \"git+ssh://[email protected]/%s/%s.git\" % (user, project)\n if pathname == \"default\":\n if \"master\" not in repo._bookmarks:\n from mercurial.commands import bookmark\n bookmark(ui, repo, \"master\", rev=\"default\")\n gh_pathname = \"github\"\n else:\n gh_pathname = \"github-\" + pathname\n ui.setconfig(\"paths\", gh_pathname, gh_path)", "def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))", "def data_import_links(self):\n dirpath = os.path.join(config[\"src_dir\"], config[\"data_subdir\"])\n assert os.path.exists(dirpath), f\"- data subdirectory {dirpath} was not found\"\n data = [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, f))\n and not f.startswith('.') and f.endswith('.csv') or f.endswith('.txt')]\n data = filter(lambda f: any([re.search(f, cell.source) for cell in self.content.cells]), data)\n return [(os.path.join(config[\"data_subdir\"], f), f\"{config['github_pages_url']}/data/{f}\") for f in data]", "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "def _create_symlink(self, source_path, main):\n main_file = os.path.realpath(os.path.join(source_path, main))\n if not os.path.isfile(main_file):\n main_file += '.js'\n if not os.path.isfile(main_file):\n print('\\tWARNING: Could not create symlink for {}, no such file.'.format(main_file))\n return\n main_file_name = os.path.basename(main_file)\n with change_working_directory(os.path.realpath(self.symlink_dir)) as cd:\n file_path = os.path.join(cd, main_file_name)\n self.created(file_path)\n if os.path.islink(file_path):\n os.remove(file_path)\n symlink(main_file, main_file_name)", "def figure_links(self):\n dirpath = os.path.join(config[\"src_dir\"], config[\"figures_subdir\"])\n assert os.path.exists(dirpath), f\"- figures subdirectory {dirpath} was not found\"\n figures = [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, f))\n and not f.startswith('.') and not f.endswith('.tex') and not f.endswith('.pdf')]\n figures = filter(lambda f: any([re.search(f, cell.source) for cell in self.content.cells]), figures)\n return [(os.path.join(config[\"figures_subdir\"], figure), f\"{config['github_pages_url']}/figures/{figure}\") for figure in figures]", "def append_links(self, lines, lang):\n lines.append(\"verbatim &nbsp;\")\n lines.append(\"section Links\")\n lines.append(\"external http://polcasaglia.blogspot.com Blog\")\n lines.append(\"external http://www.uisp-fe.it/calcio.php UISP\" )\n lines.append(\"verbatim &nbsp;\")\n return lines", "def get_github_student_url(netid):\n url = 'https://raw.githubusercontent.com/CT-CS5356-Fall2017/cs5356/master/README.md'\n r = requests.get(url)\n assert r.ok\n text = r.text\n for l in text.split('\\n'):\n if netid in l:\n return extract_netid_and_url(l)\n return None, None, None", "def makeLinks(self):\n self.deleteIndexFileIfExists()\n _fileNames = self.getHTMLFileNames()\n _msgPart1 = \"<a href=\\\"\"\n _msgPart2 = \"\\\" target=\\\"loadHTMLResults\\\">\"\n _msgPart3 = \"</a><br>\"\n _link = \"\"\n for _fileName in _fileNames:\n _origFileName = _fileName\n _linkName = _fileName.split('.')[0]\n _createAnchorTag = (_msgPart1+str(_origFileName)+_msgPart2+str(_linkName)+_msgPart3)\n _link = _link + _createAnchorTag\n return _link", "async def github(self, ctx: commands.Context, *, path: str):\n user, _, repo = path.replace(' ', '/', 1).partition('/')\n if repo:\n async with self.bot.session.get(\n f\"https://api.github.com/repos/{user}/{repo}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=data['full_name'],\n description=f\"stars: {data['stargazers_count']} forks: {data['forks_count']}\\n\"\n f\"language: {data['language']} license: {data['license']['name'] if data['license'] else 'no'}\\n\"\n +(f\"homepage: {data['homepage']}\" if data['homepage'] else ''),\n url=data['html_url']\n ).set_author(\n name=data['owner']['login'],\n url=data['owner']['html_url'],\n icon_url=data['owner']['avatar_url']\n ).set_thumbnail(\n url=data['owner']['avatar_url']\n ).add_field(\n name=\"Description\",\n value=data['description']\n )\n await ctx.send(embed=embed)\n else:\n async with self.bot.session.get(\n f\"https://api.github.com/users/{user}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=f\"{data['name']} ({data['login']})\",\n description=f\"repos: {data['public_repos']} gists: {data['public_gists']}\\n\"\n f\"followers: {data['followers']} following: {data['following']}\\n\"\n f\"location: {data['location']}\",\n url=data['html_url']\n ).set_thumbnail(\n url=data['avatar_url']\n ).add_field(\n name=\"Bio\",\n value=data['bio']\n ).add_field(\n name=\"Contact\",\n value=''.join([\n (f\"email: [{data['email']}](mailto:{data['email']})\\n\" if data['email'] else ''),\n (f\"twitter: [{data['twitter_username']}](https://twitter.com/{data['twitter_username']})\\n\" if data['twitter_username'] else ''),\n (f\"company: {data['company']}\\n\" if data['company'] else ''),\n \n ]) or 'no contact avalible'\n ).set_footer(\n text=f\"id: {data['id']}\"\n )\n await ctx.send(embed=embed)", "def _makeSymlink ( target, source, env ) :\n if len(target) != 1 :\n fail ( \"unexpected number of targets for symlink: \"+str(target) )\n if len(source) != 1 :\n fail ( \"unexpected number of sources for symlink: \"+str(source) )\n\n target = str(target[0])\n source = str(source[0].abspath)\n trace ( \"Executing symlink `%s' -> `%s'\" % ( target, source ), \"makeSymlink\", 3 )\n\n os.symlink ( source, target )", "def get_pretty_links(self, source):\n data = self.get_links(source)\n to_return = []\n for ind in data:\n if ind == '':\n continue\n if len(ind[2]) > 300:\n ind[2] = ind[2][:297] + '...'\n to_return.append([ind[1], ind[2], ind[3][0], ind[3][1]])\n if source == 'twitter':\n to_return[-1].append(ind[3][2])\n return to_return", "def test_github_file_exists(self):\n for h in self.hyperlinks:\n if h['url'].startswith('https://github.com/cyberbotics/webots/tree/released'):\n path = h['url'].replace('https://github.com/cyberbotics/webots/tree/released',\n os.path.normpath(os.environ['WEBOTS_HOME']))\n self.assertTrue(\n os.path.isfile(path) or os.path.isdir(path),\n msg='Hyperlink \"%s\" is pointing to a non-existing file or directory \"%s\" (in file \"%s\").' %\n (h['md'], path, h['file'])\n )", "def make_franny_symlinks(src_dirs, out_dir):\n\n for path, dirs, files in chain.from_iterable(os.walk(path)\n for path in src_dirs):\n print('Looking in %s' % path)\n for sta in ['NS12', 'NS13', 'NS14']:\n for filename in fnmatch.filter(files, '*.%s*' % sta):\n net = filename.split('.')[-7]\n chan = filename.split('.')[-4]\n if chan[-1] == 'N':\n new_chan = 'EH1'\n elif chan[-1] == 'E':\n new_chan = 'EH2'\n else:\n continue\n mseed_nm = filename.split('/')[-1]\n new_mseed = string.replace(mseed_nm, chan, new_chan)\n old_path = os.path.join(path, filename)\n new_path = '%s/%s/%s/%s.D/%s' % (out_dir, net,\n sta, new_chan, new_mseed)\n\n print('Creating symlink for file %s at %s'\n % (old_path, new_path))\n spwd = '*blackmore89'\n cmnd = 'sudo -S ln %s %s' % (old_path, new_path)\n os.system('echo %s | %s' % (spwd, cmnd))\n return", "def add_source_achors():\n pass", "async def _botsource(self, ctx):\r\n source_link = \"https://github.com/Simalary/SimsVIP.Servo\"\r\n await self.bot.say('{0.message.author.mention}, my source code is available at <{1}>.'.format(ctx, source_link))", "def extract_links(self, source: str) -> List[str]:\n return self.links_compiled_exp.findall(source)", "def generate_links():\n start_url = 'https://twigserial.wordpress.com/'\n base_url = start_url + 'category/story/'\n tree = etree.HTML(requests.get(start_url).text)\n xpath = './/*/option[@class=\"level-2\"]/text()'\n return [base_url + suffix.strip() for suffix in tree.xpath(xpath)]", "def fix_git_symlinked(src, dst):\n # if running from WC there should be a 'doc' dir sibling to nikola package\n if not should_fix_git_symlinked():\n return\n # probabbly in a WC, so symlinks should be fixed\n for root, dirs, files in os.walk(dst):\n for name in files:\n filename = os.path.join(root, name)\n\n # detect if symlinked\n try:\n if not (2 < os.path.getsize(filename) < 500):\n continue\n # which encoding uses a git symlink marker ? betting on default\n with open(filename, 'r') as f:\n text = f.read()\n if text[0] != '.':\n # de facto hint to skip binary files and exclude.meta\n continue\n except Exception:\n # probably encoding: content binary or encoding not defalt,\n # also in py2.6 it can be path encoding\n continue\n dst_dir_relpath = os.path.dirname(os.path.relpath(filename, dst))\n path = os.path.normpath(os.path.join(src, dst_dir_relpath, text))\n if not os.path.exists(path):\n continue\n # most probably it is a git symlinked file\n\n # copy original content to filename\n shutil.copy(path, filename)", "def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )", "def source_release(request, new_package):\n\n new_module, pkg_root = new_package\n source_label = random_str(40)\n source_url = \"http://{}.com/{}\".format(random_str(7), random_str(12))\n with open(os.path.join(new_module, META_NAME), \"w\") as openmeta:\n openmeta.write((\n '{{\"packages\": [\"find_packages()\"], \"source_label\": \"{}\", '\n '\"source_url\": \"{}\"}}'\n ).format(source_label, source_url))\n\n request.addfinalizer(module_cleanup)\n return new_module, source_label, source_url", "def LinkFiles(self, srcdir, target):\n if '+orig' in target:\n tgt_prefix = target.replace('.BRIK','')\n tgt_prefix = tgt_prefix.replace('.HEAD','')\n linkfiles = ['%s.HEAD'%tgt_prefix, '%s.BRIK' %tgt_prefix]\n else:\n linkfiles = [target]\n for linkfile in linkfiles:\n linkname = '%s/%s' % (srcdir, os.path.basename(linkfile))\n rel_linkdir = abspath_to_relpath(os.path.dirname(target), srcdir)\n rel_linkfile = '%s/%s' % (rel_linkdir, os.path.basename(linkfile))\n if not os.path.exists(linkname) and not os.path.islink(linkname):\n cmd = 'cd %s && ln -s %s %s' % (srcdir, rel_linkfile, linkname)\n self.ExecCmd(cmd)", "def link_origin_doc(link_source, links, redistribution='none', zip_file=None):\n ldoc = link_doc(link_source, 'origin', links)\n ldoc['redistribution'] = redistribution\n if zip_file:\n ldoc['zip_file'] = zip_file\n return ldoc", "async def source(self, ctx, *, command: str = None):\n\n source_url = 'https://github.com/Discord-Bots-Italia/public-bot-py'\n branch = 'master'\n\n if command is None:\n return await ctx.send(source_url)\n\n else:\n obj = self.bot.get_command(command.replace('.', ' '))\n if obj is None:\n return await ctx.send('Could not find command.')\n\n # since we found the command we're looking for, presumably anyway, let's\n # try to access the code itself\n src = obj.callback.__code__\n module = obj.callback.__module__\n filename = src.co_filename\n\n lines, firstlineno = inspect.getsourcelines(src)\n location = os.path.relpath(filename).replace('\\\\', '/')\n\n final_url = f'<{source_url}/blob/{branch}/{location}#L{firstlineno}-L{firstlineno + len(lines) - 1}>'\n await ctx.send(final_url)", "def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)", "def test_link_to_source(\n self,\n _needs_unindent,\n _is_source_requested,\n _get_source_code_from_object,\n ):\n _needs_unindent.return_value = False\n _is_source_requested.return_value = True\n _get_source_code_from_object.return_value = \"\"\n\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"basic.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_method()\n nodes = self._get_nodes(data, content) # pylint: disable=no-value-for-parameter\n\n self.assertEqual(2, len(nodes))\n self.assertTrue(any(node for node in nodes if isinstance(\n node,\n extension._SourceCodeHyperlink, # pylint: disable=protected-access\n )))", "def source():\n\n source = models.Source(name=u\"Joe's Funerals.com\", url=u\"http://www.joesfunerals.com\")\n return source", "def cleanup_links(path, inspect_links=False):\n with open(path) as f:\n text = f.read()\n\n# if 'BokehJS does not appear to have successfully loaded' in text:\n# for k, v in BOKEH_REPLACEMENTS.items():\n# text = text.replace(k, v)\n\n text = component_links(text, path)\n soup = BeautifulSoup(text, features=\"html.parser\")\n for a in soup.findAll('a'):\n href = a.get('href', '')\n if '.ipynb' in href and 'http' not in href:\n # for k, v in LINK_REPLACEMENTS.items():\n # href = href.replace(k, v)\n a['href'] = href.replace('.ipynb', '.html')\n\n # check to make sure that path exists, if not, try un-numbered version\n try_path = os.path.join(os.path.dirname(path), a['href'])\n if not os.path.exists(try_path):\n num_name = os.path.basename(try_path)\n name = re.split(r\"^\\d+( |-|_)\", num_name)[-1]\n new_path = try_path.replace(num_name, name)\n if os.path.exists(new_path):\n a['href'] = os.path.relpath(new_path, os.path.dirname(path))\n else:\n also_tried = 'Also tried: {}'.format(name) if name != num_name else ''\n warnings.warn('Found missing link {} in: {}. {}'.format(a['href'], path, also_tried))\n\n if inspect_links and 'http' in a['href']:\n print(a['href'])\n for img in soup.findAll('img'):\n src = img.get('src', '')\n if 'http' not in src and 'assets' in src:\n try_path = os.path.join(os.path.dirname(path), src)\n if not os.path.exists(try_path):\n also_tried = os.path.join('..', src)\n if os.path.exists(os.path.join(os.path.dirname(path), also_tried)):\n img['src'] = also_tried\n else:\n warnings.warn('Found reference to missing image {} in: {}. Also tried: {}'.format(src, path, also_tried))\n with open(path, 'w') as f:\n f.write(str(soup))", "def symlink():\n releases()\n env.current_path = '/root/your_project/current'\n run('rm %(current_path)s' % env)\n run('ln -s %(current_release)s %(current_path)s' % env)", "def to_sources(todos):\n for subtodos in todos.iter_sourced():\n to_path(subtodos, subtodos.get_source())", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "async def _get_source_responses(self, *urls: URL) -> SourceResponses:\n # First, get the project info so we can use the web url as landing url\n responses = await super()._get_source_responses(*urls)\n # Then, collect the commits\n responses.extend(await self.__get_commits_recursively(str(self._parameter(\"file_path\", quote=True))))\n return responses", "def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url", "def test_fix_repo_url():\n repo_url_git = 'git://github.com/Tinche/bower-cache'\n repo_url_https = 'https://github.com/Tinche/bower-cache'\n fixed_url_https = 'https://:@github.com/Tinche/bower-cache'\n assert repo_url_git == gitwrapper._fix_repo_url(repo_url_git)\n assert fixed_url_https == gitwrapper._fix_repo_url(repo_url_https)", "def _repos(repos_names):\n for rn in repos_names:\n # assuming repos_names is either a list of full paths\n # or folders in ~/src\n if \"/\" not in rn:\n rn = f\"{env.SRC}/{rn}\"\n yield git.Repo(rn)", "def get_contribution_links(type, standardized_name, namespaces_and_ids, cycle):\n \n ids = dict([(item['namespace'], item['id']) for item in namespaces_and_ids])\n if cycle == '-1':\n cycle = None\n\n links = [\n dict(text='OpenSecrets.org', url=_get_crp_url(type, standardized_name, ids, cycle)),\n dict(text='FollowTheMoney.org', url=_get_nimsp_url(type, standardized_name, ids, cycle)),\n dict(text='TransparencyData.com', url=_get_td_url(type, standardized_name, ids, cycle)),\n ]\n \n links = filter(lambda link: link['url'] is not None, links)\n\n return links", "def _urls(*, repository, commit, mirrors):\n result_with_nulls = [\n _format_url(\n pattern = x,\n repository = repository,\n commit = commit,\n )\n for x in mirrors.get(\"github\")\n ]\n return [\n url\n for url in result_with_nulls\n if url != None\n ]", "def on_files(self, files, config, **kwargs):\n linked_md_file = File(\n path=self.config[\"path_to_file\"],\n src_dir=self.config[\"path_to_src_dir\"],\n dest_dir=config[\"site_dir\"],\n use_directory_urls=config[\"use_directory_urls\"]\n )\n files.append(linked_md_file)\n return files", "def gen_url(section):\n urls = []\n urls.append('https://ia800500.us.archive.org/22/items/stackexchange/' + section + '.stackexchange.com.7z')\n urls.append('https://ia800500.us.archive.org/22/items/stackexchange/' + section + '.7z')\n return urls", "def urlrepos(prefix, roothead, paths):\n for path in paths:\n path = os.path.normpath(path)\n yield (prefix + '/' +\n util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path", "def git():\n pass", "def cli(ctx, url):\n for u in url:\n m = re.fullmatch(\n r\"(?:https?://)?(?:www\\.)?github\\.com\"\n r\"/(?P<owner>[^/]+)\"\n r\"/(?P<repo>[^/]+)\"\n r\"/(?:issues|pull)\"\n r\"/(?P<issue>\\d+)\"\n r\"(?:#issuecomment-(?P<comment>\\d+))?\",\n u,\n )\n if not m:\n click.echo(f\"{ctx.command_path}: could not parse {u!r}\", err=True)\n continue\n endpoint = ctx.obj.repos[m.group(\"owner\")][m.group(\"repo\")].issues\n if m.group(\"comment\") is None:\n endpoint = endpoint[m.group(\"issue\")].reactions\n else:\n endpoint = endpoint.comments[m.group(\"comment\")].reactions\n endpoint.post(json={\"content\": \"+1\"})", "def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes &lt;to branch&gt; on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"&lt;%(branch)s&gt; %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"", "def create_urls(years):\n urls = []\n for year in years:\n url = f\"http://billboardtop100of.com/{year}-2/\"\n urls.append(url)\n return urls", "def parse_link(self,data,api):\n return REACT_API_DOCS_URL + data.FILE.split('/')[1] + api.find('a',attrs = {'class': 'hash-link'}).attrs['href']", "def generate_links(wiki, page, tags):\n wiki.generate_tag_links(page, tags)", "def _post_src_install_soname_symlinks(mysettings, out):\n\n\timage_dir = mysettings[\"D\"]\n\tneeded_filename = os.path.join(mysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\"build-info\", \"NEEDED.ELF.2\")\n\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(needed_filename,\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tlines = f.readlines()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\t\treturn\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = \"\"\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(os.path.join(\n\t\t\tmysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\t\"build-info\", \"QA_SONAME_NO_SYMLINK\"),\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tqa_no_symlink = f.read()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = qa_no_symlink.split()\n\tif qa_no_symlink:\n\t\tif len(qa_no_symlink) > 1:\n\t\t\tqa_no_symlink = \"|\".join(\"(%s)\" % x for x in qa_no_symlink)\n\t\t\tqa_no_symlink = \"^(%s)$\" % qa_no_symlink\n\t\telse:\n\t\t\tqa_no_symlink = \"^%s$\" % qa_no_symlink[0]\n\t\tqa_no_symlink = re.compile(qa_no_symlink)\n\n\tlibpaths = set(portage.util.getlibpaths(\n\t\tmysettings[\"ROOT\"], env=mysettings))\n\tlibpath_inodes = set()\n\tfor libpath in libpaths:\n\t\tlibdir = os.path.join(mysettings[\"ROOT\"], libpath.lstrip(os.sep))\n\t\ttry:\n\t\t\ts = os.stat(libdir)\n\t\texcept OSError:\n\t\t\tcontinue\n\t\telse:\n\t\t\tlibpath_inodes.add((s.st_dev, s.st_ino))\n\n\tis_libdir_cache = {}\n\n\tdef is_libdir(obj_parent):\n\t\ttry:\n\t\t\treturn is_libdir_cache[obj_parent]\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\trval = False\n\t\tif obj_parent in libpaths:\n\t\t\trval = True\n\t\telse:\n\t\t\tparent_path = os.path.join(mysettings[\"ROOT\"],\n\t\t\t\tobj_parent.lstrip(os.sep))\n\t\t\ttry:\n\t\t\t\ts = os.stat(parent_path)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif (s.st_dev, s.st_ino) in libpath_inodes:\n\t\t\t\t\trval = True\n\n\t\tis_libdir_cache[obj_parent] = rval\n\t\treturn rval\n\n\tmissing_symlinks = []\n\n\t# Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does.\n\tfor l in lines:\n\t\tl = l.rstrip(\"\\n\")\n\t\tif not l:\n\t\t\tcontinue\n\t\tfields = l.split(\";\")\n\t\tif len(fields) < 5:\n\t\t\tportage.util.writemsg_level(_(\"\\nWrong number of fields \" \\\n\t\t\t\t\"in %s: %s\\n\\n\") % (needed_filename, l),\n\t\t\t\tlevel=logging.ERROR, noiselevel=-1)\n\t\t\tcontinue\n\n\t\tobj, soname = fields[1:3]\n\t\tif not soname:\n\t\t\tcontinue\n\t\tif not is_libdir(os.path.dirname(obj)):\n\t\t\tcontinue\n\t\tif qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None:\n\t\t\tcontinue\n\n\t\tobj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))\n\t\tsym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)\n\t\ttry:\n\t\t\tos.lstat(sym_file_path)\n\t\texcept OSError as e:\n\t\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\t\traise\n\t\telse:\n\t\t\tcontinue\n\n\t\tmissing_symlinks.append((obj, soname))\n\n\tif not missing_symlinks:\n\t\treturn\n\n\tqa_msg = [\"QA Notice: Missing soname symlink(s):\"]\n\tqa_msg.append(\"\")\n\tqa_msg.extend(\"\\t%s -> %s\" % (os.path.join(\n\t\tos.path.dirname(obj).lstrip(os.sep), soname),\n\t\tos.path.basename(obj))\n\t\tfor obj, soname in missing_symlinks)\n\tqa_msg.append(\"\")\n\tfor line in qa_msg:\n\t\teqawarn(line, key=mysettings.mycpv, out=out)", "def parse_dependency_links(*filenames):\n dependency_links = []\n for f in filenames:\n for line in open(f, 'r').read().split('\\n'):\n if re.match(r'\\s*-[ef]\\s+', line):\n line = re.sub(r'\\s*-[ef]\\s+', '', line)\n line = re.sub(r'\\s*git\\+https', 'http', line)\n line = re.sub(r'\\.git#', '/tarball/master#', line)\n dependency_links.append(line)\n return dependency_links", "async def source(self, ctx, command: str = None):\n\n source_url = \"https://github.com/Zeniath/Non-Don-Tools\"\n if command is None:\n return await ctx.send(source_url)\n \n\n obj = self.bot.get_command(command.replace('.', ' '))\n if obj is None:\n return await ctx.send('Could not find command.')\n\n src = obj.callback.__code__\n lines, firstlineno = inspect.getsourcelines(src)\n if not obj.callback.__module__.startswith('discord'):\n location = os.path.relpath(src.co_filename).replace('\\\\', '/')\n else:\n location = obj.callback.__module__.replace('.', '/') + '.py'\n source_url = \"https://github.com/Zeniath/Non-Don-Tools\"\n\n await ctx.send(f\"<{source_url}/tree/master/{location}/#L{firstlineno}-L{firstlineno + len(lines) - 1}>\")", "def source_repo_url(branch_url_mode, vcs, source_repo, source_repo_branch):\n return {\n 'short': source_repo_branch,\n 'medium': '{source_repo.strpath}#{source_repo_branch}'.format(**locals()),\n 'long': '{vcs}+{source_repo.strpath}#{source_repo_branch}'.format(**locals())\n }[branch_url_mode]", "def _make_url(self):\n ...", "def to_projectlink(self):\n\n thumb_image_url = reverse('project_serve_file', args=[self.short_name,self.logo])\n\n args = {\"abreviation\":self.short_name,\n \"title\":self.short_name,\n \"description\":self.description,\n \"URL\":reverse('comicsite.views.site', args=[self.short_name]),\n \"download URL\":\"\",\n \"submission URL\":self.get_submission_URL(),\n \"event name\":self.event_name,\n \"year\":\"\",\n \"event URL\":self.event_url,\n \"image URL\":self.logo,\n \"thumb_image_url\":thumb_image_url,\n \"website section\":\"active challenges\",\n \"overview article url\":self.publication_url,\n \"overview article journal\":self.publication_journal_name,\n \"overview article citations\":\"\",\n \"overview article date\":\"\",\n \"submission deadline\":\"\",\n \"workshop date\":self.workshop_date,\n \"open for submission\":\"yes\" if self.is_open_for_submissions else \"no\",\n \"data download\":\"yes\" if self.offers_data_download else \"no\",\n \"dataset downloads\":self.number_of_downloads,\n \"registered teams\":\"\",\n \"submitted results\":self.number_of_submissions,\n \"last submission date\":self.last_submission_date,\n \"hosted on comic\":True,\n \"created at\":self.created_at\n }\n\n projectlink = ProjectLink(args)\n return projectlink", "def source_url(self, target_url):\n raise NotImplementedError()", "def link(self, id):\r\n return links.RepoLink(self, id)", "def _docs():\n url = \"https://vanheeringen-lab.github.io/seq2science\"\n if not webbrowser.open(url):\n print(url)", "def generate_pr_link(pr_num):\n return (\n '[PR #{0}](https://github.com/sendgrid/smtpapi-python/pulls/{0})'\n ).format(pr_num)", "async def source(self, ctx):\n \"\"\" Check out my source code <3 \"\"\"\n # Do not remove this command, this has to stay due to the GitHub LICENSE.\n # TL:DR, you have to disclose source according to MIT.\n # Reference: https://github.com/AlexFlipnote/discord_bot.py/blob/master/LICENSE\n await ctx.send(f\"**{ctx.bot.user}** is powered by this source code:\\nhttps://github.com/AlexFlipnote/discord_bot.py With modifications by user: snow-blade\")", "def getURLs():", "def test_apiLinking(self):\n version = \"1.2.3\"\n input, output = self.getArbitraryLoreInputAndOutput(version)\n self.howtoDir.child(\"one.xhtml\").setContent(input)\n\n self.builder.build(version, self.howtoDir, self.howtoDir,\n self.templateFile, \"scheme:apilinks/%s.ext\")\n out = self.howtoDir.child('one.html')\n self.assertIn(\n '<a href=\"scheme:apilinks/foobar.ext\" title=\"foobar\">foobar</a>',\n out.getContent())" ]
[ "0.6921794", "0.68528706", "0.6557395", "0.6166538", "0.6133582", "0.6098029", "0.6074224", "0.6037421", "0.6030705", "0.6019107", "0.59831655", "0.596135", "0.59046274", "0.58846396", "0.58756024", "0.58383036", "0.5835905", "0.5808838", "0.5806285", "0.579724", "0.5761678", "0.5760295", "0.5716394", "0.571193", "0.57084566", "0.57023764", "0.56972474", "0.5683747", "0.567665", "0.5668867", "0.56504357", "0.56172377", "0.5613442", "0.5592097", "0.55839175", "0.5583281", "0.5561512", "0.5557213", "0.5531546", "0.5526724", "0.55101895", "0.55032897", "0.5493666", "0.547989", "0.5462794", "0.5451208", "0.5450061", "0.54320043", "0.54314923", "0.5415937", "0.5413617", "0.5411425", "0.5410771", "0.54030216", "0.5391626", "0.538444", "0.53808296", "0.5364443", "0.53565246", "0.5350616", "0.5345732", "0.53404117", "0.5335644", "0.53336567", "0.53223073", "0.532076", "0.53206587", "0.53159785", "0.5315699", "0.5302396", "0.5298311", "0.52982414", "0.5296738", "0.5296574", "0.52950954", "0.5288943", "0.5280422", "0.5272234", "0.52694404", "0.5261833", "0.5260104", "0.52579373", "0.5256384", "0.52522343", "0.5247123", "0.5247017", "0.52386135", "0.5237558", "0.5235892", "0.5228373", "0.52231085", "0.52168435", "0.52136254", "0.520601", "0.5198363", "0.5193409", "0.518321", "0.5182854", "0.5180805", "0.51773536" ]
0.5238986
86
Accept the path to the text files with lake ice fraction timeseries
def get_ts_from_file(path="", start_year=-np.Inf, end_year=np.Inf) -> pd.DataFrame: df = pd.DataFrame.from_csv(path, sep="\s+") cnames = df.columns[:] for c in cnames: y = int(c) if y < start_year or y > end_year: df.drop(c, axis=1, inplace=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_local_20Hz_files(**kwargs):\n pathlst = kwargs.get('pathlst')\n product = kwargs.get('product')\n varalias = kwargs.get('varalias')\n sdate = kwargs.get('sdate')\n edate = kwargs.get('edate')\n twin = kwargs.get('twin')\n\n # establish coords if defined in config file\n timestr = satellite_dict[product]['vardef']['time']\n lonstr = satellite_dict[product]['vardef']['lons']\n latstr = satellite_dict[product]['vardef']['lats']\n\n # adjust start and end\n sdate = sdate - timedelta(minutes=twin)\n edate = edate + timedelta(minutes=twin)\n # get meta data\n ncmeta = ncdumpMeta(pathlst[0])\n ncvar = get_filevarname(varalias, variable_info,\n satellite_dict[product], ncmeta)\n # retrieve sliced data\n ds = read_netcdfs(pathlst)\n ds_sort = ds.sortby(timestr)\n\n # get indices for included time period\n nptime = ds_sort[timestr].data\n print('here0')\n print(len(nptime))\n #dtime = [parse_date(str(nptime[i])) for i in range(len(nptime))]\n print('here1')\n #idx = find_included_times_pd(dtime, sdate=sdate, edate=edate)\n idx = find_included_times_pd(nptime, sdate=sdate, edate=edate)\n print(len(nptime[idx]))\n print('here2')\n dtime = [parse_date(str(nptime[idx][i])) for i in range(len(nptime[idx]))]\n print(dtime)\n print('here3')\n #dtime = list(np.array(dtime)[idx])\n lons = list(((ds_sort[lonstr].data[idx] - 180) % 360) - 180)\n lats = list(ds_sort[latstr].data[idx])\n\n unxt = (nptime[idx].astype(int) / 10**9)\n\n # make dict and start with stdvarname for varalias\n stdvarname = variable_info[varalias]['standard_name']\n vardict = {}\n vardict[stdvarname] = list(ds_sort[ncvar].data[idx])\n vardict['longitude'] = lons\n vardict['latitude'] = lats\n vardict['time'] = unxt\n vardict['datetime'] = dtime\n vardict['time_unit'] = variable_info['time']['units']\n print(vardict.keys())\n return vardict", "def load_data(self):\n\t\ti = 0\n\n\t\tpaths = glob.glob(self.file_path+'/rollout_*')\n\t\tself.rollouts = []\n\n\n\t\tfor path in paths:\n\t\t\tdata_point = np.load(path,encoding='latin1')\n\t\t\tself.rollouts.append(data_point)\n\n\t\treturn paths", "def read_fit_results_rikhav(path,\n feature_names=('pulse integral fit',\n 'amplitude', 'rise time', 'decay time', 'chi2 reduced')\n ):\n\n data_files = sorted(glob.glob(os.path.join(path, '*.npy')))\n if not len(data_files):\n raise ValueError(\"No data files found!\")\n\n result = dict()\n result['type'] = []\n for k in feature_names:\n result[k.replace(' ', '_')] = []\n result['chi2'] = []\n result['t0'] = []\n result['tmax'] = []\n result['integral'] = []\n tl = TimeLine(numcomp=1, function='expflare')\n\n for i, df in enumerate(data_files):\n logging.info(f\"Reading file {df:s}, assigned type: {i}\")\n x = np.load(df, allow_pickle=True).tolist()\n for xi in x.values():\n for k in feature_names:\n result[k.replace(' ', '_')].append(xi[k])\n result['type'].append(i)\n result['chi2'].append(xi['chi2 reduced'] * (xi['data'].size - 4))\n\n result['t0'].append(xi['time'][0])\n result['tmax'].append(xi['time'][-1])\n\n result['integral'].append(tl.integral(0., 100.,\n tstep=1000,\n t0_000=10.,\n tr_000=result['rise_time'][-1] * 1e6, # s to micro s\n td_000=result['decay_time'][-1] * 1e6, # s to micro s\n A_000=-result['amplitude'][-1],\n c=0.)[0]) # integral in (micro s) * V\n if not np.isfinite(result['integral'][-1]):\n result['integral'][-1] = 1e20\n\n del x\n\n for k, v in result.items():\n result[k] = np.array(result[k])\n return result", "def readtext(tpath):\r\n t1=tpath\r\n i = []\r\n #check whether text existed\r\n if os.path.exists(t1):\r\n t2=os.path.split(t1)[0]\r\n #print(t2)\r\n with open(tpath) as f:\r\n lines=f.readlines()\r\n for line in lines:\r\n # get all the image names in the text file\r\n line=line.split('\\n')[0]\r\n #generate images' absolute path\r\n line=t2+'/'+line\r\n i.append(line)\r\n f.close()\r\n return i\r\n else:\r\n print(i)\r\n return False", "def readtle(file, catalog=np.zeros(1)):\n # Adapted from Mr. Brett Pantalone's MATLAB readtle function.\n # INPUTS:\n # file - Path to any standard two-line element file.\n # catalog - Optional array of NORAD catalog numbers for the satellites of\n # interest. The default action is to display data from every\n # satellite in the file.\n \n # Brett Pantalone\n # mailto:[email protected]\n # http://research.ece.ncsu.edu/osl/\n if(catalog==np.zeros(1)):\n catalog = np.array([])\n \n try:\n fd = open(file)\n except(IOError):\n file = file[0:-3]+\".tle\"\n fd = open(file)\n \n assert(os.path.isfile('./'+file), \"File doesn''t exist in this directory.\")\n \n kiter = 0\n A0 = fd.readline().rstrip()\n A1 = fd.readline().rstrip()\n A2 = fd.readline().rstrip()\n oe = np.array([0, 0, 0, 0, 0, 0])\n epoch = np.array([0, 0])\n \n try:\n while(isinstance(A2,str)==1):\n kiter+=1\n satnum = np.array([float(A1[2:6])])\n if(catalog.size==0 or ismember(satnum,catalog)==1):\n if(kiter==1):\n print('-'*50)\n print('Satellite: %s' % A0)\n assert(chksum(A1), 'Checksum failure on line 1')\n assert(chksum(A2), 'Checksum failure on line 2')\n print(\"Catalog Number: %f\" % satnum)\n epochyear = np.array([float('20'+A1[18:20])])\n epochday = np.array([float(A1[20:31])])\n epoch = np.array([epochyear,epochday])\n print(\"Epoch time: %s\" % A1[18:31]) #YYDDD.DDDDDDDD\n inc = np.array([float(A2[8:15])])\n print(\"Inclination: %f deg\" % inc)\n raan = np.array([float(A2[17:24])])\n print(\"Right Ascension of the Ascending Node: %f deg\" % raan)\n ecc = np.array([float('.' + A2[26:32])])\n print(\"Eccentricity: %f\" % ecc)\n aop = np.array([float(A2[34:41])])\n print(\"Argument of perigee: %f deg\" % aop)\n M = np.array([float(A2[43:50])])\n print(\"Mean Anomaly: %f deg\" % M)\n n = np.array([float(A2[52:62])])\n print(\"Mean motion: %f rev/day\" % n)\n T = 86400/n;\n print(\"Period of rev: %.0f s/rev\" % T)\n a = (((T/(2*np.pi))**2)*3.986004e+14)**(1/3);\n print(\"Semimajor axis: %.0f meters\" % a)\n b = a*(1 - ecc**2)**(0.5)\n print(\"Semiminor axis: %.0f meters\" % b)\n oe = np.array([a, ecc, inc, raan, aop, M])\n elif(kiter>1):\n print('-'*50)\n print('Satellite: %s' % A0)\n assert(chksum(A1), 'Checksum failure on line 1')\n assert(chksum(A2), 'Checksum failure on line 2')\n print(\"Catalog Number: %f\" % satnum)\n epochyear = np.array([float('20'+A1[18:20])])\n epochday = np.array([float(A1[20:31])])\n epoch_new = np.array([epochyear,epochday])\n print(\"Epoch time: %s\" % A1[18:31]) #YYDDD.DDDDDDDD\n inc = np.array([float(A2[8:15])])\n print(\"Inclination: %f deg\" % inc)\n raan = np.array([float(A2[17:24])])\n print(\"Right Ascension of the Ascending Node: %f deg\" % raan)\n ecc = np.array([float('.' + A2[26:32])])\n print(\"Eccentricity: %f\" % ecc)\n aop = np.array([float(A2[34:41])])\n print(\"Argument of perigee: %f deg\" % aop)\n M = np.array([float(A2[43:50])])\n print(\"Mean Anomaly: %f deg\" % M)\n n = np.array([float(A2[52:62])])\n print(\"Mean motion: %f rev/day\" % n)\n T = 86400/n;\n print(\"Period of rev: %.0f s/rev\" % T)\n a = (((T/(2*np.pi))**2)*3.986004e+14)**(1/3);\n print(\"Semimajor axis: %.0f meters\" % a)\n b = a*(1 - ecc**2)**(0.5)\n print(\"Semiminor axis: %.0f meters\" % b)\n oe_new = np.array([a, ecc, inc, raan, aop, M])\n oe = np.concatenate((oe,oe_new), axis=1)\n epoch = np.concatenate((epoch,epoch_new),axis=1)\n A0 = fd.readline().rstrip()\n A1 = fd.readline().rstrip()\n A2 = fd.readline().rstrip()\n except:\n fd.close()\n return oe, epoch", "def loadTextFiles(path):\n\n data = []\n \n for filename in os.listdir(path):\n f=open(path+filename, 'r')\n content = f.read()\n # clean special characters and append\n data.append(re.sub('\\W+',' ', content))\n\n return data", "def readText(self, filename, firstLine = 0, lastLine = None):\n \n assert filename.endswith('.txt')\n file = open(filename, 'r')\n self.samples = []\n\n li = 0\n while li < firstLine:\n if not file.readline():\n return\n li += 1\n\n while lastLine == None or li < lastLine:\n line = file.readline()\n if not line:\n return\n li += 1\n line = line.strip()\n if line:\n columns = line.split('|')\n if columns[1] == 'client-fps':\n self.samples.append(Sample(line, columns))", "def read_data(args):\n\n print(\"Start read_data\")\n t_tot = 0 # sum of times for the all dataset\n date_dirs = os.listdir(args.path_data_base)\n for n_iter, date_dir in enumerate(date_dirs):\n # get access to each sequence\n path1 = os.path.join(args.path_data_base, date_dir)\n if not os.path.isdir(path1):\n continue\n date_dirs2 = os.listdir(path1)\n\n for date_dir2 in date_dirs2:\n path2 = os.path.join(path1, date_dir2)\n if not os.path.isdir(path2):\n continue\n # read data\n oxts_files = sorted(glob.glob(os.path.join(path2, 'oxts', 'data', '*.txt')))\n oxts = KITTIDataset.load_oxts_packets_and_poses(oxts_files)\n\n \"\"\" Note on difference between ground truth and oxts solution:\n - orientation is the same\n - north and east axis are inverted\n - position are closed to but different\n => oxts solution is not loaded\n \"\"\"\n\n print(\"\\n Sequence name : \" + date_dir2)\n if len(oxts) < KITTIDataset.min_seq_dim: #  sequence shorter than 30 s are rejected\n cprint(\"Dataset is too short ({:.2f} s)\".format(len(oxts) / 100), 'yellow')\n continue\n lat_oxts = np.zeros(len(oxts))\n lon_oxts = np.zeros(len(oxts))\n alt_oxts = np.zeros(len(oxts))\n roll_oxts = np.zeros(len(oxts))\n pitch_oxts = np.zeros(len(oxts))\n yaw_oxts = np.zeros(len(oxts))\n roll_gt = np.zeros(len(oxts))\n pitch_gt = np.zeros(len(oxts))\n yaw_gt = np.zeros(len(oxts))\n t = KITTIDataset.load_timestamps(path2)\n acc = np.zeros((len(oxts), 3))\n acc_bis = np.zeros((len(oxts), 3))\n gyro = np.zeros((len(oxts), 3))\n gyro_bis = np.zeros((len(oxts), 3))\n p_gt = np.zeros((len(oxts), 3))\n v_gt = np.zeros((len(oxts), 3))\n v_rob_gt = np.zeros((len(oxts), 3))\n\n k_max = len(oxts)\n for k in range(k_max):\n oxts_k = oxts[k]\n t[k] = 3600 * t[k].hour + 60 * t[k].minute + t[k].second + t[\n k].microsecond / 1e6\n lat_oxts[k] = oxts_k[0].lat\n lon_oxts[k] = oxts_k[0].lon\n alt_oxts[k] = oxts_k[0].alt\n acc[k, 0] = oxts_k[0].af\n acc[k, 1] = oxts_k[0].al\n acc[k, 2] = oxts_k[0].au\n acc_bis[k, 0] = oxts_k[0].ax\n acc_bis[k, 1] = oxts_k[0].ay\n acc_bis[k, 2] = oxts_k[0].az\n gyro[k, 0] = oxts_k[0].wf\n gyro[k, 1] = oxts_k[0].wl\n gyro[k, 2] = oxts_k[0].wu\n gyro_bis[k, 0] = oxts_k[0].wx\n gyro_bis[k, 1] = oxts_k[0].wy\n gyro_bis[k, 2] = oxts_k[0].wz\n roll_oxts[k] = oxts_k[0].roll\n pitch_oxts[k] = oxts_k[0].pitch\n yaw_oxts[k] = oxts_k[0].yaw\n v_gt[k, 0] = oxts_k[0].ve\n v_gt[k, 1] = oxts_k[0].vn\n v_gt[k, 2] = oxts_k[0].vu\n v_rob_gt[k, 0] = oxts_k[0].vf\n v_rob_gt[k, 1] = oxts_k[0].vl\n v_rob_gt[k, 2] = oxts_k[0].vu\n p_gt[k] = oxts_k[1][:3, 3]\n Rot_gt_k = oxts_k[1][:3, :3]\n roll_gt[k], pitch_gt[k], yaw_gt[k] = IEKF.to_rpy(Rot_gt_k)\n\n t0 = t[0]\n t = np.array(t) - t[0]\n # some data can have gps out\n if np.max(t[:-1] - t[1:]) > 0.1:\n cprint(date_dir2 + \" has time problem\", 'yellow')\n ang_gt = np.zeros((roll_gt.shape[0], 3))\n ang_gt[:, 0] = roll_gt\n ang_gt[:, 1] = pitch_gt\n ang_gt[:, 2] = yaw_gt\n\n p_oxts = lla2ned(lat_oxts, lon_oxts, alt_oxts, lat_oxts[0], lon_oxts[0],\n alt_oxts[0], latlon_unit='deg', alt_unit='m', model='wgs84')\n p_oxts[:, [0, 1]] = p_oxts[:, [1, 0]] # see note\n\n # take correct imu measurements\n u = np.concatenate((gyro_bis, acc_bis), -1)\n # convert from numpy\n t = torch.from_numpy(t)\n p_gt = torch.from_numpy(p_gt)\n v_gt = torch.from_numpy(v_gt)\n ang_gt = torch.from_numpy(ang_gt)\n u = torch.from_numpy(u)\n\n # convert to float\n t = t.float()\n u = u.float()\n p_gt = p_gt.float()\n ang_gt = ang_gt.float()\n v_gt = v_gt.float()\n\n mondict = {\n 't': t, 'p_gt': p_gt, 'ang_gt': ang_gt, 'v_gt': v_gt,\n 'u': u, 'name': date_dir2, 't0': t0\n }\n\n t_tot += t[-1] - t[0]\n KITTIDataset.dump(mondict, args.path_data_save, date_dir2)\n print(\"\\n Total dataset duration : {:.2f} s\".format(t_tot))", "def read_data(path2file: str, yr_loc: int, ti_loc: int, txt_loc: int):\n lemmatizer = WordNetLemmatizer()\n data_per_yr = defaultdict(list)\n with open(path2file, 'r') as fin:\n lines = fin.readlines()[1:]\n for each_line in lines:\n each_line = each_line.replace('\\n', '').split(',')\n yr = int(each_line[yr_loc])\n full_txt = (each_line[ti_loc] + ' ' + each_line[txt_loc]).split()\n\n full_txt_out = []\n for word in full_txt:\n if word.endswith('ing') or word.endswith('ed'):\n word = lemmatizer.lemmatize(word, pos='v')\n else:\n word = lemmatizer.lemmatize(word)\n if len(word) > 1:\n full_txt_out.append(word)\n data_per_yr[yr].append(full_txt_out)\n\n return data_per_yr", "def read_in_LC_files(input_files, obj_names, style='SNANA'):\n LC_list = []\n if style == 'SNANA':\n for i, input_file in enumerate(input_files):\n t, f, filts, err = np.genfromtxt(input_file,\n usecols=(1, 4, 2, 5), skip_header=18,\n skip_footer=1, unpack=True, dtype=str)\n t = np.asarray(t, dtype=float)\n f = np.asarray(f, dtype=float)\n err = np.asarray(err, dtype=float)\n\n sn_name = obj_names[i]\n new_LC = LightCurve(sn_name, t, f, err, filts)\n LC_list.append(new_LC)\n else:\n raise ValueError('Sorry, you need to specify a data style.')\n return LC_list", "def computeLstFromDada(filename):\n d = dada.DadaReader(filename, n_int=0)\n\n telescope = d.header[\"TELESCOPE\"]\n if telescope in ('LEDA', 'LWAOVRO', 'LWA-OVRO', 'LEDAOVRO', 'LEDA512', 'LEDA-OVRO'):\n h3(\"Data appears to be from LWAOVRO\")\n site = ledafits_config.ovro\n elif telescope in ('LWA1', 'LWA-1', 'LWA-NM', 'LWANM', 'LEDA64', 'LEDA64-NM'):\n h3(\"Data appears to be from LWA1\")\n site = ledafits_config.lwa1\n\n dt_obj = datetime.strptime(d.header[\"UTC_START\"], \"%Y-%m-%d-%H:%M:%S\") \n tsamp = float(d.header[\"TSAMP\"]) * 1e-6 # Sampling time per channel, in microseconds \n navg = int(d.header[\"NAVG\"]) # Number of averages per integration \n int_tim = tsamp * navg # Integration time is tsamp * navg \n \n byte_offset = int(d.header[\"OBS_OFFSET\"]) \n bytes_per_avg = int(d.header[\"BYTES_PER_AVG\"]) \n num_int = byte_offset / bytes_per_avg \n time_offset = num_int * int_tim \n \n pat = '(\\d+)-(\\d+)-(\\d+)[-_](\\d\\d)[:h](\\d\\d)[:m](\\d\\d)$'\n\n match = re.search(pat, d.header[\"UTC_START\"])\n if match:\n # Convert re match to integers, apart from file extension\n #(y, m, d, hh, mm, ss) = [int(m) for m in match.groups()[:-1]]\n dt = dt_obj + timedelta(seconds=time_offset)\n site.date = dt\n lst = site.sidereal_time()\n date_str = \"%04d%02d%02d\"%(dt.year,dt.month,dt.day)\n time_str = \"%02d%02d%02d\"%(dt.hour,dt.minute,dt.second)\n lst_str = str(float(lst) / 2 / np.pi * 24)\n #print lst\n #print lst_str \n #lst = str(lst).split(\":\")\n #lst_str = \"%s%s%s\"%(lst[0], lst[1], lst[2].split(\".\")[0])\n \n printRed( \"UTC START: %s\"%d.header[\"UTC_START\"] )\n printRed( \"TIME OFFSET: %s\"%timedelta(seconds=time_offset))\n printRed( \"NEW START: (%s, %s)\"%(date_str, time_str) )\n \n return date_str, time_str, lst_str\n else:\n print filename\n raise Exception(\"DadaToSiderealError\")", "def read_iers_bulletin_a(fileID):\n # read contents from input file object\n file_contents = fileID.read().decode('utf8').splitlines()\n\n # parse header text to find time offsets\n # TT-TAI\n TT_TAI = 0\n # TAI-UTC\n TAI_UTC = 0\n # counts the number of lines in the header\n count = 0\n HEADER = False\n # Reading over header text\n while not HEADER:\n # file line at count\n l = file_contents[count]\n # check if line contains time offsets\n if re.search(r'TT\\s\\=\\sTAI',l):\n TT_TAI = np.float64(re.findall(r'(\\d+\\.\\d+)',l).pop())\n if re.search(r'TAI-UTC',l):\n TAI_UTC = np.float64(re.findall(r'=\\s(\\d+\\.\\d+)',l).pop())\n # find line to set HEADER flag to True\n HEADER = bool(re.search(r'COMBINED\\sEARTH\\sORIENTATION\\sPARAMETERS:',l))\n # add 1 to counter\n count += 1\n\n # convert variables to numpy arrays\n MJD = np.zeros((7))\n UT1_UTC = np.zeros((7))\n valid = 0\n # for each day in the week\n for i in range(7):\n try:\n # split numerical instances from data line\n line_contents = file_contents[count+i+4].split()\n # years are not always complete in the bulletin file\n # Modified Julian Day (days since 1858-11-17T00:00:00)\n MJD[i] = np.float64(line_contents[3])\n # difference between UT1 and UTC times\n UT1_UTC[i] = np.float64(line_contents[8])\n except (IndexError,ValueError):\n pass\n else:\n valid += 1\n\n # calculate components for delta time\n # TAI time is ahead of GPS by 19 seconds\n TAI_GPS = 19.0\n # calculate calendar dates from Modified Julian days\n Y,M,D,h,m,s = convert_julian(MJD[:valid]+2400000.5, format='tuple')\n # calculate GPS Time (seconds since 1980-01-06T00:00:00)\n # by converting the Modified Julian days (days since 1858-11-17T00:00:00)\n GPS_Time = convert_delta_time(MJD[:valid]*8.64e4, epoch1=_mjd_epoch,\n epoch2=_gps_epoch, scale=1.0) + TAI_UTC - TAI_GPS\n # number of leap seconds between GPS and UTC\n # this finds the daily correction for weeks with leap seconds\n GPS_UTC = count_leap_seconds(GPS_Time)\n # calculate delta time (TT - UT1) -->\n # (TT-TAI) + (TAI-GPS) + (GPS-UTC) - (UT1-UTC)\n DELTAT = TT_TAI + TAI_GPS + GPS_UTC - UT1_UTC[:valid]\n\n # return dates and delta times\n return (Y,M,D,DELTAT)", "def open_file(path):\r\n f = open(path, encoding='utf-8', errors='ignore')\r\n data = f.readlines()\r\n lst_with_data = []\r\n for i in data:\r\n i = i.replace('\"', ' ').replace(\"\\t\", ' ').replace(\"\\n\", \" \").replace(\"'\", ' ').split(' ')\r\n lst_with_data.append(i)\r\n res_lst = [] \r\n for i in lst_with_data:\r\n append_lst = []\r\n for j in i:\r\n if j.isdigit() or j == \"-\":\r\n append_lst.append(j) \r\n if len(append_lst) != 0: \r\n res_lst.append(append_lst) \r\n res_lst = res_lst[1:]\r\n res = [] \r\n for i in res_lst:\r\n if len(i) != len(res_lst[0]):\r\n i = i[1:]\r\n res.append(i) \r\n else:\r\n res.append(i) \r\n ln = len(res[0])\r\n data_by_years = []\r\n for i in range(ln):\r\n data_y = []\r\n for j in res:\r\n data_y.append(j[i])\r\n data_by_years.append(data_y) \r\n dict_by_years = {}\r\n dict_with_total = file_with_total_inform(\"Total_Lviv.csv\")\r\n for i in data_by_years:\r\n dict_by_years[int(i[0])] = causes(i)\r\n dict_by_years[int(i[0])].update({\"Total\": dict_with_total[i[0]]})\r\n res_dict = {}\r\n res_dict[\"Lviv\"] = dict_by_years \r\n return res_dict", "def preprocess(series_path, data_file_list):\n\n data_files = data_file_list\n processed_essays = []\n\n # extract useful words from text files\n for file in data_files:\n file_path = os.path.join(series_path, file)\n with open(file_path, 'r') as f: sen = f.read()\n words = sentence_to_words(sen)\n processed_essays.append(words)\n\n return processed_essays", "def loadFromTSreduceFile(filename):\n\tprint \"Loading the tsreduce file: %s\"%filename\n\tultracam = False\n\tultraspec = False\n\tsaft = True\n\tinputFile = open(filename,'rt')\n\tfirstLine = inputFile.readline()\n\tif \"tsreduce\" not in firstLine:\n\t\tprint \"This is not a tsreduce file. (I couldn't find the string 'tsreduce' in the first line.)\"\n\t\treturn\n\t\t\n\ttelescopeName = 'Warwick One Metre'\n\ttelescope = findTelescope(telescopeName)\n\t\n\tfilenames = []\n\tmidTimes = []\n\tMJDs = []\n\tcounts = []\n\tskys = []\n\tsigmas = []\n\terrors = []\n\ttimeFlags = []\n\texposures = []\n\tFWHMs = []\n\tbetas = []\n\txs = []\n\tys = []\n\tnumApertures = -1\n\t\n\tfor line in inputFile:\n\t\tif line[0] == '#':\n\t\t\tif 'ReferenceTime' in line:\n\t\t\t\tparts = line.split()\n\t\t\t\treferenceDateString = parts[2]\n\t\t\t\treferenceTimeString = parts[3]\n\t\t\t\treferenceDateUTC = astropy.time.Time(referenceDateString + \" \" + referenceTimeString)\n\t\t\t\ttLocation = astropy.coordinates.EarthLocation.from_geodetic(telescope['longitude'], telescope['latitude'], telescope['altitude'])\n\t\t\t\treferenceDateUTC.location = tLocation\n\t\t\t\treferenceDate = referenceDateUTC\n\t\t\tif 'FramePattern' in line:\n\t\t\t\ttargetString = generalUtils.getBetweenChars(line, '^', '(')\n\t\telse:\n\t\t\tparts = line.strip().split()\n\t\t\tif numApertures == -1:\n\t\t\t\tnumApertures = (len(parts)-2) / 6\n\t\t\t\tprint \"number of apertures is: \", numApertures\n\t\t\t\n\t\n\tfor a in range(numApertures):\n\t\tprint \"Aperture: \", a\n\t\tcounts = []\n\t\tskys = []\n\t\tsigmas = []\n\t\terrors = []\n\t\ttimeFlags = []\n\t\texposures = []\n\t\tFWHMs = []\n\t\tbetas = []\n\t\txs = []\n\t\tys = []\n\t\tinputFile.seek(0)\n\t\tindex = 0\n\t\tfor line in inputFile:\n\t\t\tif line[0] == '#':\n\t\t\t\tcontinue\t\n\t\t\tparts = line.strip().split()\n\t\t\tif numApertures == -1:\n\t\t\t\tnumApertures = (len(parts)-2) / 6\n\t\t\t\tprint \"number of apertures is: \", numApertures\n\t\t\t\n\t\t\tif a==0:\n\t\t\t\tfilenames.append(parts[0])\n\t\t\t\tmidTimes.append(float(parts[1]))\n\t\t\t\tincrement = astropy.time.TimeDelta(midTimes[index], format='sec')\n\t\t\t\tastroDate = referenceDate + increment\n\t\t\t\tMJDs.append(astroDate.mjd)\n\t\t\t\t\n\t\t\t\n\t\t\tcolumnOffset = a * 6\n\t\t\t\n\t\t\tcounts.append(float(parts[2 + columnOffset]))\n\t\t\texposures.append(1.0)\n\t\t\tsigmas.append(float(parts[3 + columnOffset]))\n\t\t\tskys.append(float(parts[4 + columnOffset]))\n\t\t\txs.append(float(parts[5 + columnOffset]))\n\t\t\tys.append(float(parts[6 + columnOffset]))\n\t\t\tFWHMs.append(float(parts[7 + columnOffset]))\n\t\t\t\n\t\t\tprint \"%s %5.7f Counts/s: %.2f[%.2f] (%.2f, %.2f) {%.2f}\"%(filenames[index], MJDs[index], counts[-1], sigmas[-1], xs[-1], ys[-1], FWHMs[-1])\n\t\t\t\n\t\t\t\n\t\t\tindex+=1\n\t\t\n\t\t\t\t\n\t\tphotometry = {}\n\t\t\n\t\tphotometry['MJD'] = numpy.array(MJDs)\n\t\tphotometry['exposure'] = numpy.array(exposures)\n\t\tphotometry['FWHM'] = numpy.array(FWHMs)\n\t\tphotometry['x'] = numpy.array(xs)\n\t\tphotometry['y'] = numpy.array(ys)\n\t\tphotometry['counts'] = numpy.array(counts)\n\t\tphotometry['sigma'] = numpy.array(sigmas)\n\t\tphotometry['sky'] = numpy.array(skys)\n\t\t\n\t\tid = slots.getNextSlotID()\n\t\tprint \"new ID:\", id\n\t\tslot = photometryClasses.slotObject(id)\n\t\tslot.setPhotometry(photometry)\n\t\tslot.setTimeColumn('MJD')\n\t\tslot.setYColumn('counts')\n\t\tslot.setYError('sigma')\n\t\tslot.target = targetString\n\t\tslot.filter = 'Rise-1'\n\t\tslot.telescope = telescope\n\t\tslot.aperture = a\n\t\tnumSlots = slots.addSlot(slot)\n\t\t\n\t\tprint referenceDate, referenceDate.scale\n\t\tprint referenceDateUTC, referenceDateUTC.scale\n\t\ttLocation = astropy.coordinates.EarthLocation.from_geodetic(telescope['longitude'], telescope['latitude'], telescope['altitude'])\n\t\treferenceDate.location = tLocation\n\t\tprint referenceDate, referenceDate.scale, referenceDate.location\n\t\tprint referenceDate.tdb", "def spectre_tsv3(f):\n \n skip = 0\n while True:\n try: \n wav, flux, dflux = np.loadtxt(f, skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux", "def legacy_load(self,filepath= '', amplifier = 'Amplifier'):\n if filepath == '':\n filepath = filedialog.askopenfilename()\n\n file1 = open(filepath)\n ctxt = file1.readline().rstrip()\n\n header = ''\n rowskip = 0\n while ctxt[0] == '#':\n header = header + ctxt[1:]\n ctxt = file1.readline().rstrip()\n rowskip += 1\n voltstr = header[2:-1]\n file1.close()\n data1 = np.loadtxt(filepath, skiprows=rowskip)\n angles1 = data1[0, :]\n volts1 = data1[1, :]\n self.angle = angles1\n self.hydoutput= volts1\n self.amplifier = amplifier", "def p_and_l_from(files):\n if isinstance(files, str):\n files = [files]\n paths = []\n labels = []\n for file in files:\n print(f'read {file}')\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.split(' ')\n paths.append(line[0])\n labels.append(int(line[1]))\n return [paths, labels]", "def read_data_samples(fp):\n if(path.isdir(fp)):\n fps = glob.glob(fp + '\\\\*.txt')\n return list(map(lambda x: read_file(x), fps))", "def load_data(path):\n fnames = os.listdir(path)\n fnames = sorted(fnames)\n print(fnames)\n x = []\n y = []\n dates = []\n for f in fnames:\n label = f.split('.')[0].split('-')[-1]\n\n # {'AD': 0, 'CN': 1, 'MCI': 2}\n if label == 'AD':\n label = 0\n elif label == 'CN':\n label = 1\n else:\n label = 2\n\n img = load_img(os.path.join(path, f))\n img = img_to_array(img)\n x.append(img)\n y.append(label)\n date = f.split('_')[4]\n date = datetime.datetime(int(date[:4]), int(date[4:]), 1)\n dates.append(date)\n\n return x, y, dates", "def examples_from_file(path):\n examples = []\n\n # count total lines before loading\n total_lines = int(local('wc -l {}'.format(path), capture=True).split()[0])\n\n with codecs.open(path, 'r', encoding='utf-8') as f:\n for line in verboserate(f, desc='Reading data file.', total=total_lines):\n src, trg = line.strip().lower().split('\\t')\n src_words = src.split(' ')\n trg_words = trg.split(' ')\n assert len(src_words) > 0\n assert len(trg_words) > 0\n\n if use_diff:\n ex = EditExample.salient_diff(src_words, trg_words, free_set)\n else:\n ex = EditExample.whitelist_blacklist(src_words, trg_words)\n examples.append(ex)\n return examples", "def read_fit_results_axel(path):\n\n data_files = glob.glob(os.path.join(path, \"*.npy\"))\n\n tl = TimeLine(numcomp=1, function='expflare')\n if not len(data_files):\n raise ValueError(\"No data files found!\")\n\n for i, result_file in enumerate(data_files):\n r = np.load(result_file, allow_pickle=True).flat[0]\n logging.info(\"Reading file {0:s} of data type {1:n}\".format(result_file, r['type'][0]))\n\n r['integral'] = np.zeros_like(r['rise'])\n # add the integral\n for j in range(r['rise'].size):\n r['integral'][j] = tl.integral(0., 100.,\n tstep=1000,\n t0_000=r['peak'][j] * 1e6, # s to micro s\n tr_000=r['rise'][j] * 1e6, # s to micro s\n td_000=r['decay'][j] * 1e6, # s to micro s\n A_000=-r['ampli'][j],\n c=0.) # integral in (micro s) * V\n if not np.isfinite(r['integral'][j]):\n r['integral'][j] = 1e20\n if not i:\n result = r\n else:\n for k, v in result.items():\n result[k] = np.append(v, r[k])\n\n return result", "def Read_t_file(file_name):\n t=[]\n \n with open(file_name,'r') as reader:\n temp=reader.readline().strip().split()[-1].split('-')\n t.append(temp[0])\n t.append(temp[1])\n for line in reader.readlines():\n t.append(line.strip().split()[-1].split('-')[-1])\n \n return np.array(t,dtype=np.float32)", "def lammpslog(filename):\r\n\r\n with open(filename, 'r') as f:\r\n data = f.readlines()\r\n\r\n #----get how many sections are there----\r\n start = [i for i, val in enumerate(data) if val.startswith('Step ')]\r\n end = [i for i, val in enumerate(data) if val.startswith('Loop time of ')]\r\n\r\n if data[-1] is not '\\n':\r\n if data[-1].split()[0].isnumeric(): #incomplete log file\r\n end.append(len(data) - 2)\r\n \r\n start = np.array(start)\r\n end = np.array(end)\r\n linenum = end - start - 1\r\n print ('Section Number: %d' %len(linenum), ' Line Numbers: ' + str(linenum))\r\n del data \r\n\r\n final = []\r\n for i in range(len(linenum)):\r\n data = pd.read_csv(filename, sep = '\\s+', skiprows = start[i], nrows = linenum[i])\r\n final.append(data)\r\n del data\r\n\r\n return final", "def spectre_tsv(f):\n \n skip = 0\n while True:\n try: \n wav, flux = np.loadtxt(f, skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux", "def main(folder):\n if P(folder).is_file():\n folder = P(folder).parent\n fs = [ii for ii in P(folder).iterdir() if ii.suffix == '.txt']\n for i, f in enumerate(fs):\n data = np.loadtxt(f, delimiter=', ')\n y = data[:, 1] - trapz(data[:, 1], data[:, 0]) \n spins = trapz(cumtrapz(data[:, 1], data[:, 0]), data[1:, 0])\n print(f\"{f.stem:<50} count: {spins:.2e}\")", "def read_lasefile(filename, encoding=\"cp1252\"):\n path = Path(filename)\n if not path.suffix == \".lase\":\n path = path.with_suffix(\".lase\")\n file = open(str(path), encoding=encoding).read()\n lines = [i for i in re.split(\"[\\n\\r]\", file) if i]\n\n data = {}\n for l in lines:\n if re.match(r\"^\\[.*\\]$\", l):\n section = l.replace(\"[\", \"\").replace(\"]\", \"\")\n data[section] = {}\n else:\n var, value = re.split(\"=\", l, maxsplit=1)\n data[section][var] = value\n\n df = get_scandata(data[\"Scans\"])\n return df", "def read_traveltime(self):\r\n \r\n #### read Travel time from txt file\r\n \r\n \r\n #### Particle travel time branch 1\r\n excelfile_surface_branch1_high = r'excel\\flow_rate\\particle_surface_branch1_high.xlsx'\r\n inarray_surface_branch1_high = pd.read_excel(excelfile_surface_branch1_high).to_numpy() \r\n \r\n excelfile_surface_branch1_medium = r'excel\\flow_rate\\particle_surface_branch1_medium.xlsx'\r\n inarray_surface_branch1_medium = pd.read_excel(excelfile_surface_branch1_medium).to_numpy() \r\n \r\n excelfile_surface_branch1_low = r'excel\\flow_rate\\particle_surface_branch1_low.xlsx'\r\n inarray_surface_branch1_low = pd.read_excel(excelfile_surface_branch1_low).to_numpy()\r\n \r\n excelfile_bottom_branch1_high = r'excel\\flow_rate\\particle_bottom_branch1_high.xlsx'\r\n inarray_bottom_branch1_high = pd.read_excel(excelfile_bottom_branch1_high).to_numpy()\r\n \r\n excelfile_bottom_branch1_medium = r'excel\\flow_rate\\particle_bottom_branch1_medium.xlsx'\r\n inarray_bottom_branch1_medium = pd.read_excel(excelfile_bottom_branch1_medium).to_numpy()\r\n \r\n excelfile_bottom_branch1_low = r'excel\\flow_rate\\particle_bottom_branch1_low.xlsx'\r\n inarray_bottom_branch1_low = pd.read_excel(excelfile_bottom_branch1_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 1\r\n excelfile_tracer_branch1_high = r'excel\\flow_rate\\tracer_branch1_high.xlsx'\r\n inarray_tracer_branch1_high = pd.read_excel(excelfile_tracer_branch1_high).to_numpy()\r\n \r\n excelfile_tracer_branch1_medium = r'excel\\flow_rate\\tracer_branch1_medium.xlsx'\r\n inarray_tracer_branch1_medium = pd.read_excel(excelfile_tracer_branch1_medium).to_numpy()\r\n \r\n excelfile_tracer_branch1_low = r'excel\\flow_rate\\tracer_branch1_low.xlsx'\r\n inarray_tracer_branch1_low = pd.read_excel(excelfile_tracer_branch1_low).to_numpy()\r\n \r\n self.inarrays_branch1 = [inarray_surface_branch1_high, inarray_surface_branch1_medium, inarray_surface_branch1_low, \\\r\n inarray_bottom_branch1_high, inarray_bottom_branch1_medium, inarray_bottom_branch1_low, \\\r\n inarray_tracer_branch1_high, inarray_tracer_branch1_medium, inarray_tracer_branch1_low]\r\n \r\n \r\n #### Particle travel time branch 5\r\n excelfile_surface_branch5_high = r'excel\\flow_rate\\particle_surface_branch5_high.xlsx'\r\n inarray_surface_branch5_high = pd.read_excel(excelfile_surface_branch5_high).to_numpy()\r\n \r\n excelfile_surface_branch5_medium = r'excel\\flow_rate\\particle_surface_branch5_medium.xlsx'\r\n inarray_surface_branch5_medium = pd.read_excel(excelfile_surface_branch5_medium).to_numpy()\r\n \r\n excelfile_surface_branch5_low = r'excel\\flow_rate\\particle_surface_branch5_low.xlsx'\r\n inarray_surface_branch5_low = pd.read_excel(excelfile_surface_branch5_low).to_numpy()\r\n \r\n excelfile_bottom_branch5_high = r'excel\\flow_rate\\particle_bottom_branch5_high.xlsx'\r\n inarray_bottom_branch5_high = pd.read_excel(excelfile_bottom_branch5_high).to_numpy()\r\n \r\n excelfile_bottom_branch5_medium = r'excel\\flow_rate\\particle_bottom_branch5_medium.xlsx'\r\n inarray_bottom_branch5_medium = pd.read_excel(excelfile_bottom_branch5_medium).to_numpy()\r\n \r\n excelfile_bottom_branch5_low = r'excel\\flow_rate\\particle_bottom_branch5_low.xlsx'\r\n inarray_bottom_branch5_low = pd.read_excel(excelfile_bottom_branch5_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 5\r\n excelfile_tracer_branch5_high = r'excel\\flow_rate\\tracer_branch5_high.xlsx'\r\n inarray_tracer_branch5_high = pd.read_excel(excelfile_tracer_branch5_high).to_numpy()\r\n \r\n excelfile_tracer_branch5_medium = r'excel\\flow_rate\\tracer_branch5_medium.xlsx'\r\n inarray_tracer_branch5_medium = pd.read_excel(excelfile_tracer_branch5_medium).to_numpy()\r\n \r\n excelfile_tracer_branch5_low = r'excel\\flow_rate\\tracer_branch5_low.xlsx'\r\n inarray_tracer_branch5_low = pd.read_excel(excelfile_tracer_branch5_low).to_numpy()\r\n \r\n \r\n self.inarrays_branch5 = [inarray_surface_branch5_high, inarray_surface_branch5_medium, inarray_surface_branch5_low, \\\r\n inarray_bottom_branch5_high, inarray_bottom_branch5_medium, inarray_bottom_branch5_low, \\\r\n inarray_tracer_branch5_high, inarray_tracer_branch5_medium, inarray_tracer_branch5_low]", "def read_data(path, low=275000, high=300000):\n data = read_pickle(path)\n\n Y1 = data['lowT_av'].squeeze()\n Y2 = data['upT_av'].squeeze()\n # LWC = data['lwc1V_av']\n X = np.arange(Y1.shape[0]) / 100.\n # X = data['time_av'].squeeze()\n if low is not None and high is not None:\n X = X[low:high]\n Y1 = Y1[low:high]\n Y2 = Y2[low:high]\n\n return X, Y1, Y2", "def read(path, label2int):\n\n labels = [] # int labels\n samples = [] # examples as strings\n\n for label_dir in os.listdir(path):\n label_dir_path = os.path.join(path, label_dir)\n\n for file in os.listdir(label_dir_path):\n file_path = os.path.join(label_dir_path, file)\n file_text = open(file_path).read().rstrip()\n int_label = label2int[label_dir.lower()]\n samples.append(file_text)\n labels.append(int_label)\n\n return samples, labels", "def loadEpisodes(fichier):\n \n # Load the file\n with open(fichier, 'r') as f:\n episodes = []\n for episode in f.readlines(): # Read lines one by one\n episode = np.array([p.split(':') # Remove the last ';' and split':'\n for p in episode[:-2].split(';')], float)\n episode = np.array(episode, int)\n episode = episode[episode[:,1].argsort()] # Sort the array in order of the \n episodes.append(episode) # infection time \n \n return episodes", "def legacy_load(self,filepath = '', amplifier = 'Amplifier'):\n if filepath == '':\n filepath = filedialog.askopenfilename()\n file1 = open(filepath)\n ctxt = file1.readline().rstrip()\n\n header = ''\n rowskip = 0\n while ctxt[0] == '#':\n header = header + ctxt[1:]\n ctxt = file1.readline().rstrip()\n rowskip += 1\n\n voltstr = header[2:-1]\n\n if voltstr.find(',') > 0:\n volts = np.fromstring(voltstr, sep=',')\n else:\n volts = np.fromstring(voltstr, sep='\\t')\n\n file1.close()\n data1 = np.loadtxt(filepath, skiprows=rowskip)\n self.hydoutput = data1\n self.cfreq = volts\n self.amplifier = amplifier", "def loadtext(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)", "def t1_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = self.filenameparser(filename)\n self.t1_filename = filename", "def loadData(name):\n inputs = []\n outputs = []\n with open(name) as file:\n data = file.readlines()[2:]\n lines = map(str.split, data)\n for line in lines:\n inputs.append(preparePatterns(line[:-1]))\n outputs.append(float(line[-1]))\n length = len(inputs[0])\n return inputs, outputs, length", "def ReadTinker():\n # Total Potential Energy : {f} Kcal/mole\n total_line = \" Total Potential Energy :\"\n with open('LICHM_TINKEREnergy_0.log') as f:\n for line in f:\n if line.startswith(total_line):\n # print(line)\n TinkE = re.findall(r'\\-\\d+\\.*\\d*', line)\n TinkE = float(TinkE[0])\n # if AMOEBA == True:\n # if line.startswith(\"Polarization\"):\n # Epol = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Epol = float(Epol[0])\n # elif line.startswith(\"Implicit Solvation\")\n # Esolv = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Esolv = float(Esolv[0])\n f.close()\n # if AMOEBA == True:\n # TINKERPolForces = EPol + ESolv\n # TinkE += TINKERPolForces\n #\n TinkE *= kcal2ev\n return TinkE", "def t6_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n self.t6_filename = filename", "def t3_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n self.t3_filename = filename", "def read_filename(self, filename):\r\n self.text_lines = task3.read_text_file(filename)", "def read_calibration_file(\n filename, return_integration_time=False,\n create_spectrum=True\n):\n assert isinstance(filename, str)\n area_texts = {\n 'Collection-area(cm^2)': ('area', 'cm**2'),\n 'Fiber(micron)': ('diameter', 'micrometer'),\n 'Fiber(cm)': ('diameter', 'cm'),\n 'Collection-area(um^2)': ('area', 'micrometer**2')\n }\n integration_time_texts = {\n 'Int.Time(usec)': 'microsecond',\n 'IntegrationTime(sec)': 'second',\n 'Int.Time(sec)': 'second',\n 'IntegrationTime(usec)': 'microsecond',\n }\n\n area = None\n integration_time = None\n cal_data = np.loadtxt(filename, skiprows=9)\n\n with open(filename, 'r') as f:\n # just check the first nine lines\n for n in range(9):\n line = next(f)\n # removes all spaces\n line = line.replace(' ', '').replace(':', '')\n if area is None:\n for area_text, area_type in area_texts.items():\n if area_text in line:\n area = float(line.replace(area_text, ''))\n break\n\n if integration_time is None:\n for it_text, it_units in integration_time_texts.items():\n if it_text in line:\n integration_time = float(line.replace(it_text, ''))\n break\n\n if (area is None) or (integration_time is None):\n raise DreyeError(\"Could not find area or \"\n \"integration time in lamp file.\")\n\n if area_type[0] == 'diameter':\n area = area * ureg(area_type[1])\n area = np.pi * (area/2) ** 2\n area = area.to('cm**2')\n elif area_type[0] == 'area':\n area = (area * ureg(area_type[1])).to('cm**2')\n else:\n raise DreyeError(\"Area type {area_type} not recognized.\")\n\n integration_time = (integration_time * ureg(it_units)).to('s')\n\n if create_spectrum:\n cal = CalibrationSpectrum(\n values=cal_data[:, 1],\n domain=cal_data[:, 0],\n area=area\n )\n if return_integration_time:\n return cal, integration_time\n else:\n return cal\n elif return_integration_time:\n return cal_data[:, 0], cal_data[:, 1], area, integration_time\n\n return cal_data[:, 0], cal_data[:, 1], area", "def load(self, path):\n tube_files = [f for f in os.listdir(path) if f.split('.')[-1] == \"tube\"]\n\n for fl in tube_files:\n id = fl.split('.')[0]\n with open(path + fl, 'r') as f:\n for i, line in enumerate(f):\n args = line.strip().split(',')\n dt = det.Detection(args[1], int(float(args[2])), int(float(args[3])), int(float(args[4])), int(float(args[5])), int(float(args[0])), bool(args[6]))\n if i == 0:\n tube = Tube(dt, id)\n else:\n tube.detection_list.append(dt)\n\n self.active_tube_list.append(tube)", "def read_fatlasa_results(filename):\n\n pass", "def legacy_load(self,filepath = '', amplifier = 'Amplifier'):\n\n if filepath == '':\n filepath = filedialog.askopenfilename()\n\n file1 = open(filepath)\n ctxt = file1.readline().rstrip()\n\n header = ''\n rowskip = 0\n while ctxt[0] == '#':\n header = header + ctxt[1:]\n ctxt = file1.readline().rstrip()\n rowskip += 1\n\n voltstr = header[2:-1]\n\n if voltstr.find(',') > 0:\n volts = np.fromstring(voltstr, sep=',')\n else:\n volts = np.fromstring(voltstr, sep='\\t')\n\n file1.close()\n data1 = np.loadtxt(filepath, skiprows=rowskip)\n self.hydoutput = data1\n self.voltage = volts\n self.amplifier = amplifier", "def read_text(filename):\n with open(filename, 'r') as f:\n com = f.readline()[0]\n wavelength, flux = np.loadtxt(filename, unpack=True,\n usecols=(0, 1), comments=com)\n return wavelength, flux", "def read_data(self):\n self.days = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14]\n path = '../data/'\n data = []\n for day in self.days:\n filename = path + 'spectrum_day{}.txt'.format(day)\n data.append(read_file(filename))\n return data", "def get_vels(traj_num_str, num_samples):\n for_vel = []\n ang_vel = []\n # Get oxts data\n oxts_dir = \"./dataset/\" + traj_num_str.zfill(2) + \"/data/\"\n for i in range(num_samples):\n oxts_file_path = oxts_dir + str(i).zfill(10) + \".txt\"\n fid = open(oxts_file_path) \n line = fid.readlines()\n oxts_data = [float(s) for s in line[0].split(\" \")]\n for_vel.append(oxts_data[8])\n ang_vel.append(oxts_data[19])\n return for_vel, ang_vel", "def get_airglow_spectra(self):\n\n self.AIRGLOW_DIR = os.getcwd()+'/AirglowSpectra/cosby/'\n AF = glob.glob(self.AIRGLOW_DIR+'/*.txt')\n AL = []\n for file in AF:\n data = pd.read_csv(file, delim_whitespace=True)\n d = data.to_records(index=False)\n AL.append(np.array(d))\n self.AirglowLines = np.hstack(AL)", "def read_trace(path):\n suffix = path[-3:]\n if suffix == 'txt':\n return read_csv(path)\n elif suffix == 'trc':\n return read_trc(path)\n else:\n raise ValueError(\"Invalid file extension: %s\" % suffix)", "def soho_ephin_loader(startdate, enddate, resample=None, path=None, all_columns=False, pos_timestamp=None, use_uncorrected_data_on_own_risk=False):\n\n if not path:\n path = sunpy.config.get('downloads', 'download_dir') + os.sep\n\n # create list of files to load:\n dates = pd.date_range(start=startdate, end=enddate, freq='D')\n filelist = []\n for i, doy in enumerate(dates.day_of_year):\n if dates[i].year<2000:\n pre = \"eph\"\n yy = dates[i].year-1900\n else:\n pre = \"epi\"\n yy = dates[i].year-2000\n name = \"%s%02d%03d\" %(pre, yy, doy)\n try:\n file = glob.glob(f\"{path}{os.sep}{name}.rl2\")[0]\n except IndexError:\n print(f\"File {name}.rl2 not found locally at {path}.\")\n file = soho_ephin_download(dates[i], path)\n if len(file) > 0:\n filelist.append(file)\n if len(filelist) > 0:\n filelist = np.sort(filelist)\n\n col_names = ['Year', 'DOY', 'MS', 'S/C Epoch', 'Status Word part 1', 'Status Word part 2',\n 'E150', 'E300', 'E1300', 'E3000', 'P4', 'P8', 'P25', 'P41',\n 'H4', 'H8', 'H25', 'H41', 'INT',\n 'P4 GM', 'P4 GR', 'P4 S', 'P8 GM', 'P8 GR', 'P8 S',\n 'P25 GM', 'P25 GR', 'P25 S', 'P41 GM', 'P41 GR', 'P41 S',\n 'H4 GM', 'H4 GR', 'H4 S1', 'H4 S23', 'H8 GM', 'H8 GR', 'H8 S1', 'H8 S23',\n 'H25 GM', 'H25 GR', 'H25 S1', 'H25 S23', 'H41 GM', 'H41 GR', 'H41 S1', 'H41 S23',\n 'Status Flag', 'Spare 1', 'Spare 2', 'Spare 3']\n\n # read files into Pandas dataframes:\n df = pd.read_csv(filelist[0], header=None, sep=r'\\s+', names=col_names)\n if len(filelist) > 1:\n for file in filelist[1:]:\n t_df = pd.read_csv(file, header=None, sep=r'\\s+', names=col_names)\n df = pd.concat([df, t_df])\n\n # # generate datetime index from year, day of year, and milisec of day:\n df.index = doy2dt(df.Year.values, df.DOY.values + df.MS.values/1000./86400.)\n df.index.name = 'time'\n\n # drop some unused columns:\n if not all_columns:\n df = df.drop(columns=['Year', 'DOY', 'MS', 'S/C Epoch',\n 'Status Word part 1', 'Status Word part 2',\n 'P4 GM', 'P4 GR', 'P4 S',\n 'P8 GM', 'P8 GR', 'P8 S',\n 'P25 GM', 'P25 GR', 'P25 S',\n 'P41 GM', 'P41 GR', 'P41 S',\n 'H4 GM', 'H4 GR', 'H4 S1', 'H4 S23',\n 'H8 GM', 'H8 GR', 'H8 S1', 'H8 S23',\n 'H25 GM', 'H25 GR', 'H25 S1', 'H25 S23',\n 'H41 GM', 'H41 GR', 'H41 S1', 'H41 S23',\n 'Spare 1', 'Spare 2', 'Spare 3'])\n\n # Proton and helium measurements need to be corrected for effects determined post-launch,\n # cf. chapter 2.3 of https://www.ieap.uni-kiel.de/et/ag-heber/costep/materials/L2_spec_ephin.pdf\n # Until this correction has been implemented here, these data products are set to -9e9.\n # Setting use_uncorrected_data_on_own_risk=True skips this replacement, so that the uncorrected\n # data can be obtained at own risk!\n if use_uncorrected_data_on_own_risk:\n warnings.warn(\"Proton and helium data is still uncorrected! Know what you're doing and use at own risk!\")\n else:\n df.P4 = -9e9\n df.P8 = -9e9\n df.P25 = -9e9\n df.P41 = -9e9\n df.H4 = -9e9\n df.H8 = -9e9\n df.H25 = -9e9\n df.H41 = -9e9\n\n # replace bad data with np.nan:\n # there shouldn't be bad data in rl2 files!\n # df = df.replace(-9999.900, np.nan)\n\n # derive instrument status and dependencies\n status = df['Status Flag'].values\n\n fmodes = np.zeros(len(status))\n for q in range(len(status)):\n binaries = '{0:08b}'.format(int(status[q]))\n if int(binaries[-1]) == 1:\n if int(binaries[-3]) == 1:\n fmodes[q] = 1\n else:\n fmodes[q] = 2\n\n ringoff = np.zeros(len(status))\n for q in range(len(status)):\n binaries = '{0:08b}'.format(int(status[q]))\n if int(binaries[-2]):\n ringoff[q] = 1\n\n cs_e300 = '0.67 - 3.0 MeV'\n cs_e1300 = '2.64 - 6.18 MeV'\n cs_p25 = '25 - 41 MeV'\n cs_he25 = '25 - 41 MeV/N'\n if max(fmodes)==1:\n cs_e1300 = \"2.64 - 10.4 MeV\"\n cs_p25 = '25 - 53 MeV'\n cs_he25 = '25 - 53 MeV/n'\n if max(fmodes)==2:\n warnings.warn('Careful: EPHIN ring off!')\n\n # failure mode D since 4 Oct 2017:\n # dates[-1].date() is enddate, used to catch cases when enddate is a string\n if dates[-1].date() >= dt.date(2017, 10, 4):\n cs_e300 = 'deactivated bc. of failure mode D'\n cs_e1300 = \"0.67 - 10.4 MeV\"\n # dates[0].date() is startdate, used to catch cases when startdate is a string\n if dates[0].date() <= dt.date(2017, 10, 4):\n warnings.warn('EPHIN instrument status (i.e., electron energy channels) changed during selected period (on Oct 4, 2017)!')\n\n # careful!\n # adjusting the position of the timestamp manually.\n # requires knowledge of the original time resolution and timestamp position!\n if pos_timestamp == 'center':\n df.index = df.index+pd.Timedelta('30s')\n\n # optional resampling:\n if isinstance(resample, str):\n df = resample_df(df, resample, pos_timestamp=pos_timestamp)\n else:\n df = []\n\n meta = {'E150': '0.25 - 0.7 MeV',\n 'E300': cs_e300,\n 'E1300': cs_e1300,\n 'E3000': '4.80 - 10.4 MeV',\n 'P4': '4.3 - 7.8 MeV',\n 'P8': '7.8 - 25 MeV',\n 'P25': cs_p25,\n 'P41': '41 - 53 MeV',\n 'H4': '4.3 - 7.8 MeV/n',\n 'H8': '7.8 - 25.0 MeV/n',\n 'H25': cs_he25,\n 'H41': '40.9 - 53.0 MeV/n',\n 'INT': '>25 MeV integral'}\n\n return df, meta", "def loadtcdat(filename= None):\n\n import numpy as np\n from StringIO import StringIO\n import Tkinter\n from tkFileDialog import askopenfilename\n from matplotlib.pyplot import figure,subplot,plot,xlabel,ylabel,title,legend\n\n if filename is not None:\n print \"Opening %s\\n\" %(filename)\n else:\n root = Tkinter.Tk()\n root.withdraw()\n filename = askopenfilename(parent=root, title='Open File',\n filetypes=[('csv files', '*.csv'),\n ('txt files', '*.txt')])\n root.destroy()\n root.mainloop()\n\n if filename is not None:\n f=open(filename)\n names = f.readline()\n names = names.strip('\\r\\n')\n names = names.split(\",\")\n f.close()\n\n data = np.genfromtxt(filename, delimiter=',',\n unpack=True, skip_header=2)\n time = data[0]\n\n figure()\n subplot(211)\n plot(time, data[1], label='Feed bin')\n plot(time, data[2], label='Part bin')\n ylabel(r'$ T_{bin} \\left(K\\right) $')\n legend(loc='best')\n\n subplot(212)\n plot(time,data[4],label='Feed bin heater')\n plot(time,data[5],label='Part bin heater')\n xlabel(r'$ Time \\left(s\\right) $')\n ylabel(r'$ P_{heater} \\left( \\frac{W}{m^2} \\right) $')\n legend(loc='best')\n\n return (data, time, names)", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def load_data(file_path: str) -> []:\r\n allowed_files = [\".log\"] # list of allowed file extensions\r\n\r\n file_path = file_path.lower()\r\n\r\n # if type is allowed\r\n if file_path[-4:] in allowed_files:\r\n\r\n # try to open the file\r\n try:\r\n with open(file_path, \"r\") as fl:\r\n return [line.rstrip('\\n') for line in fl] # getting a list of log lines\r\n\r\n # fail\r\n except FileNotFoundError:\r\n print(\"Couldn't find a file to read from.\")\r\n except PermissionError:\r\n print('File reading permission denied.')\r\n else:\r\n print(\"Wrong file type.\")\r\n\r\n return None", "def parse_file(filepath):\n\n #number_pattern = '(\\d+(?:\\.\\d+)?)'\n #number_pattern = '(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?'\n #line_pattern = '^\\s*%s\\.*hr.*min.*$' % ('\\s+'.join([number_pattern for x in range(5)]))\n\n line_pattern = r'^\\s*(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+.*hr.*min.*$'\n\n data = [] # create an empty list to collect the data\n # open the file and read through it line by line\n with open(filepath, 'r') as file_object:\n line = file_object.readline()\n while line:\n #print(\"line: \", line)\n match = re.match(line_pattern, line)\n if match:\n #print(\"match line: \", line)\n #print(match.groups())\n row = {\n 'l_rate': match.group(1),\n 'iter': match.group(2),\n 'epoch': match.group(3),\n 'num': match.group(4),\n 'valid_loss': match.group(5),\n 'valid_acc': match.group(6),\n 'train_loss': match.group(7),\n 'train_acc': match.group(8),\n 'batch_loss': match.group(9),\n 'batch_acc': match.group(10)\n }\n #print(row)\n #return match.groups()\n\n # append the dictionary to the data list\n data.append(row)\n\n line = file_object.readline()\n\n # create a pandas DataFrame from the list of dicts\n print(\"data: \", data)\n df = pd.DataFrame(data)\n print(df.ndim)\n print(df.shape)\n print(df.dtypes)\n print(\"data frame: \", df)\n # set the School, Grade, and Student number as the index\n #df.set_index(['epoch', 'valid_loss', 'valid_acc', 'train_loss', 'train_acc'], inplace=True)\n #df.set_index(['epoch'], inplace=True)\n # consolidate df to remove nans\n #df = df.groupby(level=data.index.epoch).first()\n # upgrade Score from float to integer\n df = df.apply(pd.to_numeric, errors='ignore')\n return df", "def load_tfile_data(self, path, start=None, stop=None, dss=28):\n filename = path+\".\"+str(int(self.name))\n datafile = open(filename,\"r\")\n labels = datafile.readline().strip().split()\n logger.debug(\"load_tfile_data: labels: %s\", labels)\n datafile.close()\n labels.insert(0,'DOY')\n labels.insert(0,'Year')\n logger.debug(\"load_tfile_data: new labels: %s\", labels)\n \n # colums are: Year DOY UTC Epoch Chan Tsys Int Az El Diode Level CryoTemp\n # i4 i4 S8 f8 S2 f4 f4 f4 f4 i4 f4 f4\n data = numpy.loadtxt(filename,skiprows=1,\n dtype = {'names': tuple(labels),\n 'formats': ('i4','i4','S8','f8',\n 'S2','f4','f4','f4','f4','i4','f4', 'f4')})\n return data", "def main(args):\n fn = open(args.filename,\"r+\")\n for i, line in enumerate(fn, start = 1):\n f = open(\"string_examples_%i.txt\" %i,'w+')\n check = letter_check(line.rstrip())\n if check == 0:\n print('Sequence:', line.rstrip(), ' includes letters other than A,C,T or G, please revise this sequence')\n else:\n panda = create_panda(line.rstrip())\n LingC = calculate_LC(line.rstrip())\n f.write(line)\n f.write(str(LingC))\n f.close()\n panda.to_csv('data%i.csv' %i)", "def loadtext(infile):\n warr, farr, earr=np.loadtxt(infile, usecols=(0,1,2), unpack=True)\n return create_spectrum(warr, farr, earr)", "def readtxt(obslog):\n\n logger = log.getLogger('obslog.readtxt')\n\n if not os.path.exists(obslog):\n logger.error('Cannot access %s', obslog)\n raise SystemExit\n\n logger.info('Reading %s', obslog)\n\n with open(obslog) as f: # Since we will have to go through the data twice, read the whole file at once.\n data = f.readlines()\n\n header = ['Observation ID', 'Data Labels', 'File Numbers', 'Dataset UT', 'Target Name', 'Filters', 'Slit',\n 'Grating/Wavelength', 'Camera/Prism', 'ExpTime/LNR/Coadds', 'ACQ']\n\n pattern = dict() # Enforce formatting rules to avoid parsing comments as data:\n pattern['Observation ID'] = re.compile(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}-[0-9]+$')\n pattern['Data Labels'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['File Numbers'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['Dataset UT'] = re.compile(r'^[0-9]{2}:[0-9]{2}:[0-9]{2}$') # 09:58:15\n pattern['Target Name'] = re.compile(r'[a-zA-Z0-9_-]+') # Match any string\n pattern['Filters'] = re.compile(r'[A-Z0-9\\-]+') # H, XD, H2, X, J, H\n pattern['Slit'] = re.compile(r'[a-zA-Z0-9]+') # 0.675, ACQ, LgPin\n pattern['Grating/Wavelength'] = re.compile(r'[0-9]{2,3}/[0-9]\\.[0-9]{2}') # 32/1.65, 111/1.68\n pattern['Camera/Prism'] = re.compile(r'[A-Z]{2}/[A-Z]{3}') # LB/MIR, SB/SXD\n pattern['ExpTime/LNR/Coadds'] = re.compile(r'[0-9]+\\.[0-9]/[0-9]+/[0-9]+') # 0.2/1/25, 300.0/32/1\n pattern['ACQ'] = re.compile(r'^Y*$') # Y or ''\n\n indx = {}\n for line in data:\n if 'Electronic Observing Log' in line:\n date = line.split()[-1][7:]\n logger.debug('Log date: %s', date)\n if line[0:14] == 'Observation ID': # This defines the start of the header row\n for h in header:\n indx[h] = line.find(h) # Find where each column starts\n break # No need to go farther\n\n width = {} # Find the width of each row\n for i in range(len(header) - 1): # This requires that 'header' be an ordered array (not a dictionary)\n width[header[i]] = indx[header[i + 1]] - indx[header[i]]\n width[header[i+1]] = 1 # The ACQ field is either 'Y' or blank\n\n val = {}\n match = {}\n info = {}\n for line in data:\n logger.debug('\\n%s', line)\n files = []\n for h in header:\n val[h] = line[indx[h]: indx[h] + width[h]].strip()\n match[h] = re.match(pattern[h], val[h])\n logger.debug('%s: \"%s\" %s' % (h, val[h], match[h]))\n\n # Maybe throw a warning if only match 1 fails; indicating a likely bad pattern specification?\n\n if None in match.values():\n logger.debug('Failed to match all patterns -> This is a comment')\n continue\n\n if '-' in val['File Numbers']:\n start, stop = val['File Numbers'].split('-')\n for i in range(int(start), int(stop)+1):\n files.append(i)\n else:\n files.append(int(val['File Numbers']))\n\n for filenum in files:\n f = 'N%sS%04d.fits' % (date, filenum)\n logger.debug('File: %s', f)\n info[f] = {}\n for h in [header[0]] + header[3:]: # Skip 'Data Labels' and \"File Numbers'\n info[f][h] = val[h]\n\n logger.debug('info: %s', info)\n return info", "def read_in_files():\n\n num_files = len([name for name in os.listdir(DATA_SOURCE) if name.endswith(\".txt\")])\n loading_section_size = num_files / 30\n count = 0\n\n sentences_as_lists = []\n for filename in os.listdir(DATA_SOURCE):\n if filename.endswith(\".txt\"):\n\n # Pretty loading bar\n print(\"Processing Files: [\", end=\"\")\n for i in range(31, -1, -1):\n if count > i * loading_section_size:\n for j in range(0, i):\n print(\"-\", end=\"\")\n sys.stdout.flush()\n for j in range(i, 30):\n print(\" \", end=\"\")\n sys.stdout.flush()\n break;\n if count == num_files:\n print(\"] \", count, end=\"\\n\")\n else:\n print(\"] \", count, end=\"\\r\")\n sys.stdout.flush()\n\n # Open the paper\n paper_to_open = DATA_SOURCE + filename\n paper = Reader().open_file_single_string(paper_to_open)\n udata = paper.decode(\"utf-8\")\n paper = udata.encode(\"ascii\", \"ignore\")\n\n # Split the data into a list of sentences, where each sentence is a list of words\n sentences = sent_tokenize(paper)\n\n for sentence in sentences:\n words = word_tokenize(sentence)\n sentences_as_lists.append(words)\n\n if DEBUG:\n print(sentences_as_lists)\n wait()\n\n count += 1\n\n return sentences_as_lists", "def read_FIREX_ICT_file(path, FileName):\n # Setup a manual file reader for the ICT files.\n file2use = '{}/{}'.format(path, FileName)\n # Local variables\n HeaderLineStarts = 'Time_Start, Time_Stop, Day_Of_Year_YANG, Latitude_YANG'\n Year = 2019\n FirstDayOfYear = datetime.datetime(Year, 1, 1)\n DOYvar = 'Day_Of_Year_YANG'\n StartVar = 'Time_Start'\n # Extract file by reading line by line\n with open( file2use, 'r') as OpenedFile:\n\n # Read data after the head line has been read\n ReadDataHereOnwards = False\n data = []\n for line in OpenedFile:\n line = line.strip()\n # Extract data after header\n if ReadDataHereOnwards:\n data += [line.split(',')]\n # skip lines until header for data found\n if line.startswith(HeaderLineStarts):\n header = line.split(',')\n header = [i.strip() for i in header]\n ReadDataHereOnwards = True\n\n # Compile data and header into a pd.DataFrame\n df = pd.DataFrame(data, columns=header)\n # convert columns to floats where possible\n for col in df.columns:\n df.loc[:, col] = pd.to_numeric(df[col])\n\n # Update the index to be in datetime\n dates = []\n days = df[DOYvar].values\n for idx in df.index:\n day = df.loc[idx, DOYvar]\n seconds = df.loc[idx, StartVar]\n date = FirstDayOfYear + datetime.timedelta(int(day) - 1.0)\n date = AC.add_secs(date, seconds)\n dates += [date]\n df.index = dates\n return df", "def loadatran(filename, wl=True, verbose=False):\n # 2008-08-21 09:42 IJC: Created to save myself a bit of time\n # 2008-08-25 10:08 IJC: Read in all lines at once; should go\n # faster with sufficient memory\n # 2008-09-09 13:56 IJC: Only convert the wavelength and flux\n # columns(#1 & #2) -- speed up slightly.\n if filename.__class__==list:\n returnlist = []\n for element in filename:\n returnlist.append(loadatran(element, wl=wl))\n return returnlist\n \n f = open(filename, 'r')\n dat = f.readlines()\n f.close()\n if verbose:\n print dat[0]\n print dat[0].split()\n print dat[0].split()[1:3]\n print dat[0].split()[2]\n \n if wl:\n data = array([map(float, line.split()[1:3]) for line in dat])\n else:\n data = array([float(line.split()[2]) for line in dat])\n\n return data", "def Load_Neg_Names(self, pos_file_name):\n\n #pos_filename = str(pos_file_name[0]) # Torch loader...\n pos_filename = str(pos_file_name)\n if self.text==True:\n if self.window==0:\n pattern = re.escape(pos_filename)+r\".+$\"\n elif self.window!=0:\n pattern = re.escape(pos_filename)+r\".\"+re.escape(str(self.window))+r\".*$\"\n else:\n print(\"window should be \\{0, 1, 2, 3\\}\")\n else:\n if self.window==0:\n pattern = re.escape(pos_filename)+r\".*$\"\n elif self.window!=0:\n pattern = re.escape(pos_filename)+r\".\"+re.escape(str(self.window))+r\".*$\"\n else:\n print(\"window should be \\{0, 1, 2, 3\\}\")\n\n regex = re.compile(pattern)\n dir_paths = os.walk(self.neg_dir_path) # Return Generator\n Neg_File_List = []\n for dir_path in dir_paths:\n root_dir = dir_path[0]\n file_path_list = dir_path[2]\n\n # Read only text files\n for file_name in file_path_list:\n if regex.match(file_name) is not None:\n Neg_File_List.append(file_name)\n\n return Neg_File_List", "def loadDiodeTemp(h6, filename):\n \n f_fine = h6.freqs\n f = h6.freqs_cal\n num_chans = h6.h5.root.raw_data.beam_01.cols.xx[0].shape[0]\n \n #temps_x = np.fromfile(filename_x).reshape([13,16])\n #temps_y = np.fromfile(filename_y).reshape([13,16])\n\n if filename.endswith('.hdf') or filename.endswith('.h5') or filename.endswith('.hdf5'):\n temps, tsys = mbcal(filename)\n else:\n temps = np.fromfile(filename).reshape([26,16])\n tsys = np.zeros_like(temps)\n\n temps_x = temps[0:13]\n temps_y = temps[13:26]\n tsys_x = tsys[0:13]\n tsys_y = tsys[13:26]\n\n temps_fine_x = np.zeros([13, num_chans])\n temps_fine_y = np.zeros([13, num_chans])\n tsys_fine_x = np.zeros([13, num_chans])\n tsys_fine_y = np.zeros([13, num_chans])\n \n for i in range(0,13):\n temps_fine_x[i] = fitLine(f, temps_x[i], num_chans)\n temps_fine_y[i] = fitLine(f, temps_y[i], num_chans)\n tsys_fine_x[i] = fitLine(f, tsys_x[i], num_chans)\n tsys_fine_y[i] = fitLine(f, tsys_y[i], num_chans)\n \n return temps_x, temps_y, tsys_x, tsys_y", "def t2_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n self.t2_filename = filename", "def main(name, line1, line2, orbital_filename):\n #name = \"TERRA\"\n #line1 = \"1 25994U 99068A 16048.43680378 .00000258 00000-0 67198-4 0 9999\"\n #line2 = \"2 25994 98.1982 124.4247 0001352 105.3907 254.7441 14.57126067859938\"\n satellite = ephem.readtle(name, line1, line2)\n \n\n # Landsat 8\n #name = \"Landsat8\"\n #line1=\"1 39084U 13008A 16051.82349873 .00000188 00000-0 51829-4 0 9999\"\n #line2=\"2 39084 98.1988 123.2603 0001265 89.4360 270.6984 14.57110027160810\"\n #LD8 = ephem.readtle(name, line1, line2)\n \n\n sun = ephem.Sun()\n fov = np.radians(68.6)\n\n \"\"\"\n Make pandas dataframe to store swath information\n \"\"\"\n import pandas as pd\n data = {\"DateTime\": [],\"DOY\":[],\"Month\": [],\n \"orbit_id\":[], \"ground_lat\": [], \n \"ground_lon\": [], \"swath_width\": []}\n swaths = pd.DataFrame(data)\n swaths.set_index(keys=\"DateTime\")\n # generate shapefile\n\n orbit_id = 0\n # need to do splitted by hemisphere unfortunately..\n for orbit in make_an_orbit(satellite):\n #import pdb; pdb.set_trace()\n if len(orbit) > 1:\n \"\"\"\n So worth doing processing on orbit...\n\n \"\"\"\n sun = ephem.Sun()\n\n print(orbit[0].datetime)\n\n for overpass in orbit:\n overpass.only_daytime_overpasses(sun)\n overpass.derive_swath_width(fov)\n \"\"\"\n Create a tempoary dataframe for this orbit\n \"\"\"\n epoch = datetime.datetime(1970, 1, 1)\n #import pdb; pdb.set_trace()\n tmp_d = {\"DateTime\": [(o.datetime - epoch).total_seconds() for o in orbit],\n \"DOY\":[int(o.datetime.strftime('%j')) for o in orbit],\n \"Month\": [o.datetime.month for o in orbit],\n \"orbit_id\": orbit_id * np.ones(len(orbit)),\n \"ground_lat\": [o.lat for o in orbit],\n \"ground_lon\": [o.long for o in orbit],\n \"swath_width\": [o.swath_width for o in orbit]}\n tmp = pd.DataFrame(tmp_d)\n tmp.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n orbit_id +=1 \n \"\"\"\n Append to main dataframe\n \"\"\"\n swaths = swaths.append(tmp)\n #swaths.set_index(keys=\"DateTime\")\n\n \"\"\"\n Save the DataFrame to a file\n \"\"\"\n swaths = swaths.set_index(keys=\"DateTime\")\n #swaths.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n swaths.to_csv(orbital_filename, header=True)", "def ingest():\n\n base_path = '/home/mnichol3/Coding/wx-scripts/wtlma'\n\n flash_files = ['flash-out-05232019-2050.txt',\n 'flash-out-05232019-2100.txt',\n 'flash-out-05232019-2110.txt',\n 'flash-out-05232019-2120.txt',\n 'flash-out-05232019-2130.txt',\n 'flash-out-05232019-2140.txt',\n 'flash-out-05232019-2150.txt']\n\n df_cols = ['start', 'end', 'duration', 'area', 'ctr_alt', 'ctr_lat', 'ctr_lon',\n 'tot_energy']\n\n flash_df = pd.read_csv(join(base_path, flash_files[0]), sep=',', names=df_cols)\n\n for f in flash_files[1:]:\n curr_path = join(base_path, f)\n curr_df = pd.read_csv(curr_path, sep=',', names=df_cols)\n flash_df = pd.concat([flash_df, curr_df], ignore_index=True)\n\n return flash_df", "def loadfile():\n try:\n x = tkinter.filedialog.askopenfilename()\n except TypeError:\n return\n if not x:\n return\n y = x.split(\".\")\n if y[-1] == \"fits\":\n # TODO: this is extremely stupid and dummy. Create new function for converting\n # add proper formating etc\n hdulist = fits.open(x)\n tbdata = hdulist[1].data\n a = tbdata.field('TMID')/86400.0 + 2453005.5\n b = 15 - 2.5*numpy.log10(tbdata.field('TAMFLUX2'))\n out = \"\"\n for i in range(len(a)):\n out += str(a[i]) + \" \" * 5 + str(b[i]) + \"\\n\"\n return (x, out)\n else:\n file = open(x)\n y = file.read()\n file.close()\n s = (x, y)\n return s", "def processText(self, text: str, filename: str) :\n execution_time = 0.\n\n directory = os.path.join(self.execution_time_dir, AUDIO_DIR, self.getTTS().getName())\n make_dir(directory)\n time_for_generating_audio_fpath = os.path.join(directory, filename + \".txt\")\n \n audio_fpath = self.getTTS().getAudioPath(\n text=text, audio_dir=self.audio_dir, filename=filename)\n \n if self.recompute or not os.path.exists(audio_fpath):\n # print(audio_fpath)\n start_time = time.time()\n self.getTTS().generateAudio(text=text, audio_fpath=audio_fpath)\n save_execution_time(fpath=time_for_generating_audio_fpath, execution_time=time.time() - start_time)\n \n ## add execution time for generating audio\n execution_time += get_execution_time(\n fpath=time_for_generating_audio_fpath) \n \n transcription_dir = os.path.join(self.transcription_dir, self.getTTS().getName())\n \n transcriptions = {}\n for asr in self.asrs :\n directory = os.path.join(\n self.execution_time_dir, TRANSCRIPTION_DIR, self.getTTS().getName(), asr.getName())\n make_dir(directory)\n time_for_recognizing_audio_fpath = os.path.join(\n directory, filename + \".txt\")\n\n if self.recompute :\n start_time = time.time()\n # TODO: \n # change recognize audio -> input audio instead of fpath\n # audio = asr.loadAudio(audio_fpath=audio_fpath)\n # transcription = asr.recognizeAudio(audio=audio)\n # asr.saveTranscription(transcription_fpath, transcription)\n transcription = asr.recognizeAudio(audio_fpath=audio_fpath)\n asr.setTranscription(transcription)\n asr.saveTranscription(transcription_dir=transcription_dir, filename=filename)\n save_execution_time(fpath=time_for_recognizing_audio_fpath, execution_time=time.time() - start_time)\n \n transcription = asr.loadTranscription(\n transcription_dir=transcription_dir, filename=filename)\n num_retry = 0\n while transcription == \"\" and num_retry < self.max_num_retry :\n start_time = time.time()\n asr.recognizeAudio(audio_fpath=audio_fpath)\n asr.saveTranscription(\n transcription_dir=transcription_dir, filename=filename)\n save_execution_time(\n fpath=time_for_recognizing_audio_fpath, execution_time=time.time() - start_time)\n transcription = asr.loadTranscription(\n transcription_dir=transcription_dir, filename=filename)\n\n if asr.getName() == \"wit\" :\n random_number = float(random.randint(9, 47))/10.\n time.sleep(random_number)\n\n num_retry += 1\n\n transcriptions[asr.getName()] = preprocess_text(transcription)\n\n ## add execution time for generating audio\n execution_time += get_execution_time(\n fpath=time_for_recognizing_audio_fpath) \n \n\n cases = self.caseDeterminer(text, transcriptions)\n # if sum(cases.values()) == 0 :\n # print(text)\n # print(transcriptions[\"wav2vec2\"])\n # print(cases)\n # print()\n \n for asr_name, case in cases.items() :\n self.saveCase(self.case_dir, self.getTTS().getName(), asr_name, filename, str(case))\n\n # print(f\"Execution time: {execution_time}\")\n return cases, execution_time", "def t7_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n self.t7_filename = filename", "def load_raw_text():\n if not os.path.exists( os.path.join( DATA_HOME, RAW_TEXT_FILE ) ) or \\\n not os.path.exists( os.path.join( DATA_HOME, LABELS_FILE ) ):\n print( 'no prior files found. staring from scratch' )\n rev, rat = parse_json( os.path.join( DATA_HOME, JSON_FILE ) )\n y = np.array( rat )\n print( 'saving data to files' )\n pickle.dump( rev , open( os.path.join( DATA_HOME, RAW_TEXT_FILE ), 'wb' ) )\n pickle.dump( y , open( os.path.join( DATA_HOME, LABELS_FILE ), 'wb' ) )\n else:\n print( 'found raw text and labes. loading...' )\n rev = pickle.load( open( os.path.join( DATA_HOME, RAW_TEXT_FILE ), 'rb' ) )\n y = pickle.load( open( os.path.join( DATA_HOME, LABELS_FILE ), 'rb' ) )\n print( 'done' )\n \n return rev, y", "def entries_from_goes_ts_files(*files, default_waveunit=None, source=None):\n\n\n \"\"\"\n ts_goes = ts.TimeSeries(file)\n statinfo = os.stat(file)\n entry = DatabaseEntry(path=file)\n entry.size = statinfo.st_size\n\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n #\n entry.observation_time_start = ts_goes.meta.get('date-beg').values()[0]\n entry.observation_time_end = ts_goes.meta.get('date-end').values()[0]\n\n entry.metadata = ts_goes.meta.metadata[0][2]\n\n #entry.tags = [ sunpy.database.attrs.Tag('raw') ]\n \"\"\"\n\n\n for file in files:\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n entry.size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n elif headers[1].get('TELESCOP','') != '':\n entry.instrument = headers[1]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n entry.observation_time_start = start_time\n entry.observation_time_end = end_time\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n if source:\n entry.source = source\n\n entry.metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n #entry = DatabaseEntry(instrument='EIT', wavemin=25.0)\n\n #return entry\n yield entry", "def readRyan(inDir, inSuffix, startTime, endTime):\n\t\n\tnumTrackTimes = 0\n\ttotNumCells = 0\n\tstormCells = {} \n\tdates = []\n\t\n\t# Read in Ryan files\n\tfor root, dirs, files in os.walk(inDir):\n\t\tif inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue\n\t\tfor trackFile in files:\n\t\t\tif trackFile.endswith('.data'):\n\t\t\t\t\n\t\t\t\t# Skip hidden files\n\t\t\t\tif trackFile.startswith('._'): continue\n\t\t\t\t\n\t\t\t\t# Check if file falls in date range\n\t\t\t\ttry:\n\t\t\t\t\tfileDate = datetime.datetime.strptime(str(trackFile).split('_')[0], '%Y-%m-%d-%H%M%S')\n\t\t\t\texcept ValueError:\n\t\t\t\t\tprint('File ' + str(trackFile) + ' has an invalid name. Expected format YYYY-MM-DD-hhmmss_...')\n\t\t\t\t\tcontinue\n\t\t\t\tif not startTime <= fileDate < endTime:\n\t\t\t\t\tcontinue\n\t\t\t\tif fileDate.date() not in dates: dates.append(fileDate.date())\n\t\t\t\t\n\t\t\t\t# Open file\n\t\t\t\tf = open(root + '/' + trackFile)\n\t\t\t\tlines = f.readlines()\n\t\t\t\tf.close()\n\t\t\t\t\n\t\t\t\t# Skip probSevere files\n\t\t\t\tif int(lines[28].split()[0]) == 1:\n\t\t\t\t\tprint('\\nWARNING: Unable to process storm objects from probSevere in Ryan format. Use \"-t probsevere\" instead.')\n\t\t\t\t\tprint(str(trackFile) + ' will be skipped.\\n')\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tprint(trackFile)\n\t\t\t\tnumTrackTimes += 1\n\t\t\t\t\n\t\t\t\t# Get Individual cell metadata\n\t\t\t\tcells = lines[32::5]\n\t\t\t\tnumCells = len(cells)\n\t\t\t\t\n\t\t\t\tfor cell in cells:\n\t\t\t\t\tcell = cell.split()\n\t\t\t\t\tcellID = totNumCells\n\t\t\t\t\tstormCells[cellID] = {'time':fileDate, 'lat':float(cell[0]), 'lon':float(cell[1]), 'latr':float(cell[3]), \n\t\t\t\t\t\t\t\t\t\t\t'lonr':float(cell[4]), 'orientation':float(cell[8]), 'track':str(cell[9]) + '_' + str(fileDate.date()), 'old_track': str(cell[9])} \n\t\t\t\t\ttotNumCells += 1\n\t\t\t\t\t\t\n\treturn [stormCells, totNumCells, numTrackTimes, dates]", "def readin (filename, hdu=0, non_std_fits=False,\n text_comments='#', text_skiprows=0, get_data=False, verbose=False,\n apply_WCS_rv=False):\n multi_order_txt = False\n use_naxis2='all'\n use_naxis3='all'\n \n \n preferred_wlsoln=None # !! need to fix this\n # !! should also be able to input wavelength solution?\n \n if preferred_wlsoln is not None: preferred_wlsoln = wlsolvefxn.get_func_name(preferred_wlsoln)\n \n #### check if file exists ####### #############\n if not os.path.exists(filename): raise IOError(\"File does not exist:'\"+filename+\"'\")\n\n\n #### check if file is text############# \n np_kwargs = {'comments':text_comments,\n 'skiprows':text_skiprows}\n is_text_file, txt_data = check_for_txt_format(filename,**np_kwargs)\n\n #### if it is a text file ######################\n if is_text_file:\n spec_obj = readin_txt(filename,txt_data,get_data) \n return spec_obj \n\n #### now check how it behaves as a fits file\n if non_std_fits: hdulist = pyfits.open(filename)\n else:\n # give standard pyfits readin a try\n try: hdulist = pyfits.open(filename)\n except: raise IOError(\"PYFITS DOES NOT LIKE THE FILE YOU GAVE ('\"+filename+\"'), TO SEE WHAT ERROR IT GIVES TRY: hdulist = pyfits.open('\"+filename+\"')\")\n\n\n #### open up fits file ##############################\n hdulist = pyfits.open(filename)\n\n # select which header unit ot use\n if len(hdulist) > 1: \n hdu = int(hdu)\n hdu = np.clip(hdu,0,len(hdulist)-1)\n else: hdu = 0\n\n # specify the current header unit\n header_unit = hdulist[hdu]\n prihdr = header_unit.header\n\n # can display some useful information \n if verbose: \n print \"=\"*60\n print (hdulist.info(),'\\n')\n if len(hdulist) > 1:\n print \"=\"*20+\" USING HEADER: \"+\"=\"*20\n print repr(hdulist[hdu])\n\n ##### fill in the data class\n # not get header info of relevance\n simple = query_fits_header(prihdr,'SIMPLE',noval=False)\n xtension = query_fits_header(prihdr,'XTENSION')\n if simple.found:\n if not simple.val: print \"HeadsUp: Header Keyword SIMPLE is False, you may encounter unexpected behavior\"\n else:\n if not xtension.found: print \"HeadsUp: No extension keyword found in headers, you may encounter unexpected behavior\"\n \n \n #### read in important information from header, if present\n ibits = query_fits_header(prihdr,'BITPIX') # how many bits per pixel in the data? Not currently necessary, numpy will adapt\n \n naxis = query_fits_header(prihdr,'NAXIS' ,noval=0) # how many dimenstions?\n naxis1 = query_fits_header(prihdr,'NAXIS1',noval=0) # number of points per order\n naxis2 = query_fits_header(prihdr,'NAXIS2',noval=0) # number of orders\n naxis3 = query_fits_header(prihdr,'NAXIS3',noval=0) # number of different spectra\n\n apformat = query_fits_header(prihdr,'APFORMAT')\n if apformat.found: print \"WARNING: I'M NOT SURE HOW TO DEAL WITH APFORMAT VALUES\" # !! though I think it's just the spec files\n\n if not naxis.found: raise IOError(\"ERROR: Keyword NAXIS not found\")\n\n bzero = query_fits_header(prihdr,\"BZERO\",noval=0)\n bscale = query_fits_header(prihdr,\"BSCALE\",noval=1)\n\n ###### read in data ##############################################\n data = header_unit.data\n\n if data is None:\n wl, data, inv_var = np.zeros(3).reshape((3,1))\n if get_data: return (wl,data,inv_var)\n else: return eyeSpec_spec(wl,data,inv_var,header_unit.header)\n else:\n # check that data matches up with at least one of the dimensions\n if data.ndim != naxis.val: raise ValueError(\"Dimension of data \"+str(data.ndim)+\" does not match keyword naxis \"+str(naxis.val))\n \n statement = 'Dimension does not match data.shape = '+str(data.shape)+\" fits file (naxis1, naxis2, naxis3) \"+str(tuple([naxis1.val,naxis2.val,naxis3.val]))\n if data.ndim == 1: \n assert data.shape == (naxis1.val,) , statement\n data = data.reshape((1,1,)+data.shape)\n \n elif data.ndim == 2: \n assert data.shape == (naxis2.val, naxis1.val), statement\n data = data.reshape((1,)+data.shape) \n \n elif data.ndim == 3: \n assert data.shape == (naxis3.val, naxis2.val, naxis1.val), statement\n \n ##### Determine the which data is useful \n # which orders to read in \n nband = np.arange(data.shape[0])+1\n nord = np.arange(data.shape[1])+1\n\n \n ##### Calculate the wavelengths for the data\n # set up wavelength and inverse_variance\n wl = np.ones(data.shape)\n \n # get the wavelength coefficients\n wlcoeff = wlsoln_coeff_from_header(header_unit.header, apply_WCS_rv, preferred_wlsoln)\n \n # the same wavelength solution is applied to all bands so just pick the first and broadcast\n band = 0\n priv_info = {}\n \n # go through all the orders\n do_progress = True\n progressive_pt = 1 # this will advance and be used when there is no wavelength solution\n for i in xrange(len(nord)):\n order_i = nord[i]\n\n # get the coefficients and function type \n equ_type = wlcoeff.get_equation_type()\n if equ_type in ['none',None,'no solution'] and do_progress: \n coeff = [progressive_pt,1]\n equ_type = 'pts'\n else: coeff = wlcoeff.get_coeffs(order_i)\n \n # pts[0] = 1 :: this was definitely the right thing to do for SPECTRE's 1-D output but may not be for other equations, may need pts[0]=0, this may be for bzero,bscale\n pts = np.arange(len(wl[0][i]))+1 \n # apply function\n wl[0][i] = wlsolvefxn(pts, coeff, equ_type) \n \n progressive_pt += len(pts)\n \n for j in xrange(len(nband)): \n band_j = nband[j]\n if (band_j,order_i) not in priv_info: priv_info[(band_j,order_i)] = {} \n # record the private information\n priv_info[(band_j,order_i)]['disp']= [coeff, equ_type]\n priv_info[(band_j,order_i)]['rv'] = [0] \n priv_info[(band_j,order_i)]['disp extr'] = deepcopy(wlcoeff.extra)\n \n # now propogate the solution to the other bands\n stdwl = wl[0]\n for i in xrange(1,len(nband)): wl[i] = stdwl \n \n inv_var = np.ones(data.shape)\n #=================================================================#\n # return the data .OR. go on and create the spec_obj\n if get_data: return (wl, data, inv_var)\n\n #=================================================================# \n spec_obj = eyeSpec_spec(wl,data,inv_var,header_unit.header)\n # set up private information\n priv_info['filename'] = filename\n spec_obj.filename = filename\n \n bands = np.array(np.arange(1,len(data)+1),dtype=str)\n band_info = {}\n i = -1\n for key in prihdr.keys():\n if key[:6] != 'BANDID': continue\n if i < len(bands):\n i+=1\n bands[i] = prihdr[key]\n band_info[key] = prihdr[key]\n else: raise IOError(\"MORE BANDID KEYWORDS IN HEADER THAN FIRST DIMENSION OF DATA\") \n\n # add band info if available:\n if len(band_info) != 0: priv_info['bandids'] = band_info\n else: priv_info['bandids'] = None\n \n # match up the private info created during read in to the spec_obj\n for key in priv_info: spec_obj._private_info[key] = priv_info[key]\n \n # map fits value => acutal index\n # spec_obj._bands = {}\n # spec_obj._orders = {}\n # for i in range(len(nspec)): spec_obj._bands[nspec[i]] = i\n # for i in range(len(nord)): spec_obj._orders[nord[i]] = i\n # \n \n if 7 in nband: spec_obj.set_band(6) # this is where Magellian data stores it's object data, i.e. BANDID7 which is index 6\n\n if len(hdulist) > 1: spec_obj.hdrlist = [h.header for h in hdulist]\n \n return spec_obj", "def l1a_filenames(pattern=None, **kwargs):\n return get_filenames('l1a', pattern=pattern, **kwargs)", "def load_line_lists(lamps, unknown=False, skip=False, all=False, NIST=False,\n restrict_on_instr=None):\n # All?\n if all:\n ### Also search the cache for linelists\n line_files = glob.glob(os.path.join(data.Paths.linelist, '*_lines.dat'))\n lamps = []\n for line_file in line_files:\n i0 = line_file.rfind('/')\n i1 = line_file.rfind('_')\n lamps.append(line_file[i0+1:i1])\n\n msgs.info(f\"Arc lamps used: {', '.join(lamps)}\")\n # Read standard files\n lists = []\n for lamp in lamps:\n if NIST:\n line_file = os.path.join(data.Paths.nist, f'{lamp}_vacuum.ascii')\n else:\n line_file = os.path.join(data.Paths.linelist, f'{lamp}_lines.dat')\n if not os.path.isfile(line_file):\n if not skip:\n line_files = glob.glob(os.path.join(data.Paths.linelist, '*_lines.dat'))\n all_list = [os.path.split(ll)[1].replace(\"_lines.dat\", \"\") for ll in line_files]\n msgs.warn(\"Input line {:s} is not included in arclines\".format(lamp))\n msgs.info(\"Please choose from the following list:\" + msgs.newline() +\n \",\".join(all_list))\n import pdb; pdb.set_trace()\n raise IOError(\"Cannot continue without list\")\n else:\n lists.append(load_line_list(line_file, NIST=NIST))\n # Stack\n if len(lists) == 0:\n return None\n line_lists = vstack(lists, join_type='exact')\n\n # Restrict on the spectrograph?\n if restrict_on_instr is not None:\n instr_dict = defs.instruments()\n gdI = (line_lists['Instr'] & instr_dict[restrict_on_instr]) > 0\n line_lists = line_lists[gdI]\n \n # Unknown\n if unknown:\n unkn_lines = waveio.load_unknown_list(lamps)\n unkn_lines.remove_column('line_flag') # may wish to have this info\n # Stack\n line_lists = vstack([line_lists, unkn_lines])\n\n # Return\n return line_lists", "def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels", "def load_timestamps(data_path):\n timestamp_file = os.path.join(data_path, 'oxts', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps", "def _parse_synth(path):\n files, coords, angles = [], '', ''\n with open(f'{path}paths.txt') as f:\n while True:\n line = f.readline()\n if not line:\n break\n else:\n files.append(line.rstrip('\\n'))\n return files", "def t8_loadFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = self.filenameparser(filename)\n self.t8_filename = filename", "def load_data(path):\n with open(path) as f:\n return f.readlines()", "def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return", "def readProbSevere(inDir, inSuffix, startTime, endTime):\n\t\n\tnumTrackTimes = 0\n\ttotNumCells = 0\n\tstormCells = {} \n\tdates = []\n\t\n\t# Read in ProbSevere files\n\tfor root, dirs, files in os.walk(inDir):\n\t\tif inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue\n\t\tfor trackFile in files:\n\t\t\tif trackFile.endswith('.ascii'):\n\t\t\t\t\n\t\t\t\t# Skip hidden files\n\t\t\t\tif trackFile.startswith('._'): continue\n\t\t\t\t\n\t\t\t\t# Check if file falls in date range\n\t\t\t\ttry:\n\t\t\t\t\tdate = str(trackFile).split('.')[0].split('_')[3]\n\t\t\t\t\ttime = str(trackFile).split('.')[0].split('_')[4]\n\t\t\t\t\tfileDate = datetime.datetime.strptime(date + '_' + time, '%Y%m%d_%H%M%S')\n\t\t\t\texcept ValueError:\n\t\t\t\t\tprint('File ' + str(trackFile) + ' has an invalid name. Expected format SSEC_AWIPS_PROBSEVERE_YYYYMMDD_hhmmss.ascii...')\n\t\t\t\t\tcontinue\n\t\t\t\tif not startTime <= fileDate < endTime:\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\tif fileDate.date() not in dates: dates.append(fileDate.date())\n\t\t\t\t\t\n\t\t\t\t# Open file\n\t\t\t\tf = open(root + '/' + trackFile)\n\t\t\t\tlines = f.readlines()\n\t\t\t\tf.close()\n\t\t\t\t\n\t\t\t\tprint(trackFile)\n\t\t\t\tnumTrackTimes += 1\n\t\t\t\t\n\t\t\t\tfor line in lines:\n\t\t\t\t\tif line.startswith('Valid:'): continue\n\t\t\t\t\tdata = str(line).split(':')\n\t\t\t\t\tlats = list(map(float, data[7].split(',')[0::2]))\n\t\t\t\t\tlons = list(map(float, data[7].split(',')[1::2]))\n\t\t\t\t\ttrack = data[8]\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tlatr = (max(lats) - min(lats)) / 2.\n\t\t\t\t\tlonr = abs(max(lons) - min(lons)) / 2.\n\t\t\t\t\t\n\t\t\t\t\t# Calculate centroid\n\t\t\t\t\tpoints = []\n\t\t\t\t\tfor i in range(0, len(lats)):\n\t\t\t\t\t\tpoints.append((lons[i], lats[i]))\n\t\t\t\t\tpoly = Polygon(points)\n\t\t\t\t\t\n\t\t\t\t\tlon = poly.centroid.x\n\t\t\t\t\tlat = poly.centroid.y\n\t\t\t\t\t\n\t\t\t\t\tcellID = totNumCells\n\t\t\t\t\tstormCells[cellID] = {'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, \n\t\t\t\t\t\t\t\t\t\t\t'orientation': 'NaN', 'track': track + '_' + str(fileDate.date()), 'old_track': track}\n\t\t\t\t\ttotNumCells += 1\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\treturn [stormCells, totNumCells, numTrackTimes, dates]", "def eeg_readswf(file):\t\t\n\tf=open(file,'r')\t\n\tfirstline = f.readline() # ntpts TSB info etc\n\tstr = string.split(firstline)\n\tntpts = int(str[1])\t\n\ttsb = float(str[3])\n\tdi = float(str[5])\t\n\ttim = np.arange(tsb,ntpts*di+tsb,di)\t\n\tline = f.readline()\t\n\tstr = string.split(line)\n\teeg0 = np.array(map(float,str[1:]))\n\tline = f.readline()\t\n\tstr = string.split(line)\n\teeg1 = np.array(map(float,str[1:]))\n\teeg = np.zeros([2,ntpts])\n\teeg[0,:]=eeg0\n\teeg[1,:]=eeg1\n\treturn [eeg,tim,ntpts]", "def generate_candidiates(caption_filename):\n reprog = re.compile(r\"\"\"/tv/20\\d\\d/20\\d\\d-\\d\\d/20\\d\\d-\\d\\d-\\d\\d/\n (20\\d\\d)-(\\d\\d)-(\\d\\d)_(\\d\\d)(\\d\\d)_(.+)\"\"\", re.I | re.X)\n candidateList = []\n utcTime = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=timezone.Utc)\n\n m = reprog.match(caption_filename)\n if not m:\n print(\"Caption filename is not well-formated.\")\n return []\n\n g = m.groups()\n if not (len(g) == 6):\n print(\"Caption filename is not well-formated.\")\n return []\n\n utcTime = utcTime.replace(year=int(g[0]),\n month=int(g[1]),\n day=int(g[2]),\n hour=int(g[3]),\n minute=int(g[4]))\n\n for deltaHour in range(-1, 5):\n newUtcTime = utcTime + datetime.timedelta(hours=deltaHour)\n\n # match all CNN file within the time range.\n candidate_pattern = \"/tv/{0:%Y/%Y-%m/%Y-%m-%d/%Y-%m-%d_%H%M}_US_CNN_*.txt\".format(\n newUtcTime)\n candidateList.extend(glob.glob(candidate_pattern))\n\n return candidateList\n\n #/tv/2012/2012-01/2012-01-05/2012-01-05_2100_US_CNN_Situation_Room.txt\n #/tvspare/transcripts/CNN-automated/2012/2012-01/2012-01-05/2012-01-05_2100_US_CNN_Situation_Room.rawtxt", "def load_hrm_txt(filename):\n df = pd.read_csv(filename, header=None, sep='\\t')\n x = df.values\n times = x[:, 0]\n marks = x[:, 1]\n pres = x[:, 2:].T\n\n # sometimes there is an extra column of NANs at the end, so remove it\n if np.sum(np.isnan(pres[-1])) == len(times):\n pres = pres[:-1, :]\n\n return times, marks, pres", "def get_times(traj_num_str):\n times = []\n # Get timestamps of sequence\n times_file_path = \"./dataset/\" + traj_num_str.zfill(2) + \"/times.txt\"\n with open(times_file_path, \"r\") as fid:\n for i, line in enumerate(fid):\n times.append(float(line))\n return times", "def load_timestamps_img(data_path):\n timestamp_file = os.path.join(data_path, 'image_00', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps", "def loadFolder(self, path):\n for file_name in os.listdir(path):\n if (file_name.split(\".\")[-1] == \"txt\"):\n file_path = path + \"/\" + file_name\n self.loadFile(file_path)", "def read_data_nmt():\n data_dir = download_extract('fra-eng')\n with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:\n return f.read()", "def parse(file_name, user, agenda_type, db):\n db.execute('''select section from roll where onyen=%(onyen)s''', dict(onyen=user))\n row = db.fetchone()\n section = None if row is None else row.section\n\n # Get Recitation zoom\n db.execute(\"\"\"select url from zoom where type='recitation'\"\"\")\n row = db.fetchone()\n recitation_zoom_url = row.url if row else None\n\n # Get lecture zoom\n lecture_zoom_urls = []\n if section in ['001', '003']:\n db.execute(\"\"\"select url from zoom where type='lecture' and section='001'\"\"\")\n lecture_zoom_urls.append(db.fetchone().url)\n if section in ['002', '003']:\n db.execute(\"\"\"select url from zoom where type='lecture' and section='002'\"\"\")\n lecture_zoom_urls.append(db.fetchone().url)\n\n # Get checklist information\n checklist_info = get_checklist_info(db, user, agenda_type)\n\n if agenda_type == 'la':\n first_day_of_class = date(2021, 1, 12)\n else:\n first_day_of_class = date(2021, 1, 19)\n last_day_of_classes = date(2021, 5, 5)\n today = date.today()\n with open(file_name, \"rt\") as fp:\n agenda = fp.read().split(\"\\n| \")\n day = first_day_of_class\n result = []\n for one_days_details in agenda:\n lines = one_days_details.split(\"\\n\")\n title = lines[0]\n output_lines = []\n for line in lines[1:]:\n if line.startswith(\"S \"):\n line = slide_line(line)\n elif line.startswith(\"#\"):\n line = comment_line(line, user)\n elif line.startswith(\"Z\"):\n line = zoom_line(line, day, section, lecture_zoom_urls, recitation_zoom_url)\n elif line.startswith(\"CL\"):\n line = checklist_line(line, day, checklist_info)\n output_lines.append(line)\n when = set_when(day, today)\n\n result.append(\n {\"date\": day, \"title\": title, \"when\": when,\n \"body\": renderMarkdown(renderTemplate(\"\\n\".join(output_lines)))})\n day = increment_day(day, last_day_of_classes, result, agenda_type)\n return result", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def _load_time_series(self, path: str) -> np.ndarray:\n items = []\n previous = None\n for item in sorted(pathlib.Path(path).glob(\"*.nc\")):\n with xr.open_dataset(item) as ds:\n current = ds.ocean_time.values[0].astype(\"datetime64[M]\")\n if (previous is not None\n and (current - previous != np.timedelta64(1, \"M\"))):\n raise ValueError(\"Time series not continuous\")\n items.append((current, str(item)))\n previous = current\n length = max(len(item[1]) for item in items)\n return np.array(\n items,\n dtype={\n \"names\": (\"date\", \"path\"),\n \"formats\": (\"datetime64[M]\", f\"U{length}\"),\n },\n )", "def load_sotu_data():\n sotu_files = glob.glob(\"sotu-data/*.txt\")\n path_desc = re.compile(r\"sotu-data/([A-Za-z]+)_([0-9]{4})\\.txt\")\n for filepath in sotu_files:\n with open(filepath, \"r\") as f:\n raw_text = f.read()\n pres, year = path_desc.search(filepath).groups()\n yield {\"president\": pres, \"year\": year, \"speech\": raw_text}", "def process_input_file(filename):\n f = open(filename, 'r')\n\n rows = []\n i = 0\n for line in f:\n # skip optimal steps and time limit\n if i > 1 and len(line.strip()) > 0:\n rows.append(list(line.strip()))\n i += 1\n\n f.close()\n\n row_len = len(rows[0])\n num_rows = len(rows)\n\n return LaserTankMap(row_len, num_rows, rows)", "def read_data(self, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n data = []\n with open(path,'r') as f:\n for line in f:\n data.append([float(line[k:(k + length)]) for k in range(\n 0, len(line.strip('\\n')),length)])\n return np.array(data)", "def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time", "def read_txt(path):\n mz = []\n i = []\n with open(path) as f:\n for line in f:\n line = line.split()\n mz.append(float(line[0]))\n i.append(float(line[1]))\n return mz, i", "def read_in_celex_lines(path):\n return [line.strip().split(\"\\\\\") for line in open(path, \"r\").readlines()]", "def t4_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n self.t4_filename = filename", "def __init__(self):\n self.file_name = 'moes_tavern_lines.txt'\n self.path_to_file = abspath(join(getcwd(), '../data',\n self.file_name))", "def loadtext2(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)", "def open_text_file(filepath):\n sentences = []\n sentencemanager = nmea.NMEASentenceManager()\n for line in open_file_generator(filepath):\n sentencemanager.process_sentence(line)\n sentences.append(line)\n return sentencemanager, sentences" ]
[ "0.5803808", "0.57068276", "0.56613946", "0.5642458", "0.562522", "0.55623674", "0.55397063", "0.5512734", "0.5485281", "0.5476176", "0.5475341", "0.54747045", "0.54190755", "0.54162836", "0.53791934", "0.5370917", "0.5362841", "0.535579", "0.5337363", "0.5330271", "0.53223175", "0.532207", "0.53213865", "0.5317907", "0.5317004", "0.5313872", "0.53128654", "0.5287831", "0.5282043", "0.52745587", "0.5271572", "0.5270309", "0.5263404", "0.52541393", "0.52529263", "0.5242862", "0.52327514", "0.5227481", "0.52255", "0.5205649", "0.52040666", "0.52030426", "0.5202868", "0.5196932", "0.51769024", "0.51644665", "0.515889", "0.5158677", "0.515657", "0.5147399", "0.5141294", "0.51378286", "0.51339924", "0.51287717", "0.51267195", "0.5126356", "0.5124755", "0.51146823", "0.51140064", "0.5113864", "0.51123327", "0.5108932", "0.51048625", "0.510461", "0.51002073", "0.5097932", "0.50939167", "0.50921595", "0.50791556", "0.50718445", "0.5064549", "0.5064461", "0.5064121", "0.5063254", "0.5055697", "0.5055247", "0.50524837", "0.5051233", "0.5046656", "0.5046552", "0.5044461", "0.50436574", "0.50427485", "0.503797", "0.50346667", "0.5034658", "0.5029082", "0.5024581", "0.5015689", "0.50154644", "0.50112957", "0.50108284", "0.5005928", "0.49985012", "0.49961165", "0.49959284", "0.49937293", "0.49928355", "0.4992782", "0.4987725", "0.49783245" ]
0.0
-1
Test case for get_chain_by_id
def test_get_chain_by_id(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_chains(self):\n pass", "def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None", "def test_solareclipses_id_get(self):\n pass", "def sample_chains():\n c = chain(add.s(1, 1), add.s(1), add.s(1))\n res = c()\n print(res.get())\n print(res.parent.get())\n print(res.parent.parent.get())", "def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]", "def test_workflows_id_get(self):\n pass", "def get_chain(self, chain_id):\n if self.default_model is None:\n return None\n if self.default_model.chain_dict.has_key(chain_id):\n return self.default_model.chain_dict[chain_id]\n return None", "def test_get_case_by_id(self):\n pass", "def test_liechtensteinsettlements_id_get(self):\n pass", "def test_coupledmodels_id_get(self):\n pass", "def test_prefectures_id_get(self):\n pass", "def create_chain(self, _id, config):\n chain = Chain()\n config[\"instances\"] = self.instances\n chain.setup(_id,config)\n \n return chain", "def get_message_chain(self, request_id):\n logger.debug('request_id = %s' % request_id)\n with self._message_chains_lock:\n if request_id in self._message_chains:\n return self._message_chains[request_id]\n else:\n #logger.debug('no message chain found for request_id %s' %\n # request_id)\n #for rid, mc in self._message_chains.iteritems():\n # logger.debug(' %s - %s' % (rid, mc))\n return None", "def validate_chain():", "def get_chain(self):\n return self.chain", "def get_chain(self):\n return self.chain", "def trip_chain(self):\n pass", "def test_comicscreators_id_get(self):\n pass", "def get_chain_name (chain):\n if \"-\" in chain.id:\n id_chain=chain.id[-1]\n else:\n id_chain=chain.id\n return id_chain", "def test_workflows_id_exists_get(self):\n pass", "def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None", "def fetch_chain(self, certr, max_length=10):\n action = LOG_ACME_FETCH_CHAIN()\n with action.context():\n if certr.cert_chain_uri is None:\n return succeed([])\n elif max_length < 1:\n raise errors.ClientError('chain too long')\n return (\n DeferredContext(\n self._client.get(\n certr.cert_chain_uri,\n content_type=DER_CONTENT_TYPE,\n headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))\n .addCallback(self._parse_certificate)\n .addCallback(\n lambda issuer:\n self.fetch_chain(issuer, max_length=max_length - 1)\n .addCallback(lambda chain: [issuer] + chain))\n .addActionFinish())", "def test_workflows_find_one_get(self):\n pass", "def test_christiandoctrines_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/christiandoctrines/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_0_3_id_inc2(self):\n\n test = self.b1.id\n test2 = self.b2.id\n test3 = self.b3.id\n self.assertEqual(test, test2 - 1)\n self.assertEqual(test3, 22)", "def test_beneficiaries_retrieve_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve')\n response = self.client.get(url)\n self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)", "def test_workflows_id_team_get(self):\n pass", "def test_get_recipe_equipment_by_id(self):\n pass", "def diagnose_chain(chain):\n if chain[0] == 'all':\n dir = data.meta_dir_base()\n if os.path.exists(dir):\n for chain_id in os.listdir(dir):\n if utils.valid_chain_id(chain_id):\n diagnose_server(chain_id)\n else:\n consoler.info(' No published chain exist, do nothing.')\n else:\n for i in range(len(chain)):\n chain_get = chain[i].split(':')\n if len(chain_get) == 1:\n if utils.valid_chain_id(chain_get[0]):\n diagnose_server(chain_get[0])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s', chain_get[0])\n elif len(chain_get) == 2:\n if utils.valid_chain_id(chain_get[0]):\n if utils.valid_ip(chain_get[1]):\n ansible.diagnose_module(\n chain_get[1], ansible.get_dir() + '/' + chain_get[0])\n else:\n consoler.info(\n ' skip, invalid host, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid format, not chain_id:host, input %s', chain_get)", "def get_object(id):", "def chain():\n chain_identifier, url = get_vars(request, [\"id\", \"data\"])\n info('chain=%s' % chain_identifier)\n chain = LAPPS_SERVICE_CHAINS.get_chain(chain_identifier)\n info('source-url=%s' % url)\n data = requests.get(url).text\n result = chain.run({\n \"discriminator\": \"http://vocab.lappsgrid.org/ns/media/text\", \n \"payload\": data})\n info(\"discriminator=%s\" % result.get('discriminator'))\n return render_template(\"chain.html\",\n chain=chain,\n fname=url,\n result=result,\n builder=HtmlBuilder())", "def test_settle_tx_known_chain(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=self.ledger_id,\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.decision_maker.ledger_apis, \"transfer\", return_value=\"tx_digest\"\n ):\n tx_digest = self.decision_maker._settle_tx(tx_message)\n assert tx_digest == \"tx_digest\"", "def get_chain(self, **kwargs):\n self._check_if_fitted()\n return self._sampler.get_chain(**kwargs)", "def test_peers_peerid_get(self):\n pass", "def test_GET_receipt_by_id(self):\n\t\tself.POST_receipt()\n\t\t# verify receipt data matches list_data and that date set\n\t\tlist_data = self.GET_data('/api/list/search?_id=' + self.list_id + '&populate_rooms=true')[0]\n\t\treceipt_data = self.GET_data('/api/receipt/' + self.receipt_id)\n\n\t\tself.assertEqual(list_data['_id'], receipt_data['_list'])\n\t\tself.assertDataMatch(list_data, receipt_data, ['_cleaner', 'phonenumber', 'notes', 'price','location'])\n\n\t\tself.assertTrue('date' in receipt_data)\n\t\tself.assertTrue(dateutil.parser.parse(receipt_data['date']) > datetime.now())\n\n\t\t# for each room in list_data and receipt_data, assert they match\n\t\tself.assertEqual(len(list_data['rooms']), len(receipt_data['rooms']))\n\t\tnum_rooms = len(list_data['rooms'])\n\n\t\tfor r in range(num_rooms):\n\t\t\tself.assertEqual(list_data['rooms'][r]['name'], receipt_data['rooms'][r]['name'])\n\t\t\tself.assertEqual(len(list_data['rooms'][r]['tasks']), len(receipt_data['rooms'][r]['tasks']))\n\t\t\tfor t in range(len(list_data['rooms'][r]['tasks'])):\n\t\t\t\tself.assertEqual(list_data['rooms'][r]['tasks'][t], receipt_data['rooms'][r]['tasks'])\n\n\t\t# verify receipt.cleaner is filled in public cleaner\n\t\tcleaner_data = self.GET_data('/api/cleaner/' + receipt_data['_cleaner'])\n\t\tself.assertEqual(cleaner_data['name'], receipt_data['cleaner']['name'])\n\t\tself.assertEqual(cleaner_data['phonenumber'], receipt_data['cleaner']['phonenumber'])\n\t\tself.assertTrue('hashed_pwd' not in receipt_data['cleaner'])\n\n\t\t# delete receipt's parent list and assert receipt not deleted and receipt._list is null\n\t\tself.DELETE('/api/list/' + self.list_id)\n\t\treceipt_data = self.GET_data('/api/receipt/' + self.receipt_id)\n\t\tself.assertNotEqual(None, receipt_data)\n\t\tself.assertEqual(receipt_data['_list'], None)", "def test_getItineraryFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + date['date'])\n invuid = '00000000000000000000000'\n\n rv = self.json_get('/getItineraryFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': invuid})\n assert 'Itinerary not found' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': uid})\n assert uid in str(rv.data)", "def test_chain(mocker):\n transaction = Transaction(\n chain=-1,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)\n\n transaction.chain = 15\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.chain = 257\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)", "def _get(isamAppliance, id):\n return isamAppliance.invoke_get(\"Retrieve a specific STS chain\", \"{0}/{1}\".format(uri, id),\n requires_modules=requires_modules,\n requires_version=requires_version)", "def test_austriansettlements_id_get(self):\n pass", "def test_brains_id_get(self):\n pass", "def test_get_specific_by_id(self):\n token = self.get_token()\n self.client.post('/api/v2/party', data=self.add_party,\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n response = self.client.get('/api/v2/party/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json',\n )\n self.assertEqual(response.status_code, 200)", "async def test_txn_list_with_ids(self):\n paging = Mocks.make_paging_response(0, 2)\n transactions = Mocks.make_txns('0', '2')\n self.stream.preset_response(head_id='2', paging=paging, transactions=transactions)\n\n response = await self.get_assert_200('/transactions?id=0,2')\n controls = Mocks.make_paging_controls()\n self.stream.assert_valid_request_sent(transaction_ids=['0', '2'], paging=controls)\n\n self.assert_has_valid_head(response, '2')\n self.assert_has_valid_link(response, '/transactions?head=2&id=0,2')\n self.assert_has_valid_paging(response, paging)\n self.assert_has_valid_data_list(response, 2)\n self.assert_txns_well_formed(response['data'], '0', '2')", "def test_intercommunalitys_id_get(self):\n pass", "def test_get_payments_by_id(self):\n pass", "def test_user_id_identities_get(self):\n pass", "def test_get_recipe_price_breakdown_by_id(self):\n pass", "def test_basketballteams_id_get(self):\n pass", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)", "def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n for atm in self.iter_alt_loc():\n atm.chain_id = chain_id", "def test_racetracks_id_get(self):\n pass", "def test_poets_id_get(self):\n pass", "def test_get_related_nodes(self):\n pass", "def chain_cmd(ctx):\n pass", "def test_companies_company_id_data_bank_accounts_account_id_transactions_get(self):\n pass", "def test_post_chain_search(self):\n pass", "def test_required_deleted_chain_gets_stubbed(self):\n self.txn.store_delete(\"felix-b\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_delete, set())\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-c\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})", "def test_metrostations_id_get(self):\n pass", "def get(self, _id):", "async def test_retrieve_order_by_id(self):\n order = {\n 'id': '46871284',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'state': 'ORDER_STATE_PLACED',\n 'symbol': 'AUDNZD',\n 'magic': 123456,\n 'platform': 'mt5',\n 'time': '2020-04-20T08:38:58.270Z',\n 'openPrice': 1.03,\n 'currentPrice': 1.05206,\n 'volume': 0.01,\n 'currentVolume': 0.01,\n 'comment': 'COMMENT2'\n }\n client.get_order = AsyncMock(return_value=order)\n actual = await api.get_order('46871284')\n assert actual == order\n client.get_order.assert_called_with('accountId', '46871284')", "def test_check_duplication_entry_at_restoring_two_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n ref_entity_2 = Entity.objects.create(name=\"ReferredEntity2\", created_user=self._user)\n ref_entries_2 = [\n Entry.objects.create(name=\"ref2-%d\" % i, created_user=self._user, schema=ref_entity_2)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n attr_info_2 = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries_2[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries_2[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n for attr_name, info in attr_info_2.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=ref_entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity_2)\n\n ref_entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n ref_entries[0].complement_attrs(self._user)\n for attr_name, info in attr_info_2.items():\n attr = ref_entries[0].attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n # sync referral entries from database\n [x.refresh_from_db() for x in ref_entries]\n [x.refresh_from_db() for x in ref_entries_2]\n\n self.assertFalse(ref_entries_2[1].is_active)\n\n # create same name entry\n Entry.objects.create(name=\"ref2-1\", created_user=self._user, schema=ref_entity_2)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def test_find_long_chains_multiple(self):\n # a -> b -> c -> ... x\n # \\________________/\n self.skill_graph = SkillGraph.load()\n old_skill = self.skill_graph.add(Skill.build('o', ''))\n last_skill = self.skill_graph.add(Skill.build('l', ''))\n self.skill_graph.add_prerequisite(last_skill.id, old_skill.id)\n chain_ids = [old_skill.id]\n for index in range(CHAINS_MIN_LENGTH):\n new_skill = self.skill_graph.add(Skill.build(str(index), ''))\n chain_ids.append(new_skill.id)\n self.skill_graph.add_prerequisite(new_skill.id, old_skill.id)\n old_skill = new_skill\n self.skill_graph.add_prerequisite(old_skill.id, last_skill.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains()\n self.assertEqual([chain_ids], result)", "def test_datatransformationsetups_id_get(self):\n pass", "def set_chain_id(self, chain_id):\n ## check for conflicting chain_id in the structure\n if self.model is not None:\n chk_chain = self.model.get_chain(chain_id)\n if chk_chain is not None or chk_chain != self:\n raise ChainOverwrite()\n\n Segment.set_chain_id(self, chain_id)\n\n ## resort the parent structure\n if self.model is not None:\n self.model.chain_list.sort()", "def test_beneficiaries_retrieve_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve', kwargs={'pk': 1})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def get_chain(self):\n return self.segment.chain", "def get_chain(self):\n return self.segment.chain", "def test_companies_company_id_data_journal_entries_journal_entry_id_get(self):\n pass", "def complex_recursion_regression_test(self):\n\n class User(Strongbox):\n ID = attr(int)\n username = attr(str)\n domains = linkset((lambda : Domain),\"user\")\n sites = linkset((lambda : Site),\"user\")\n class Domain(Strongbox):\n ID = attr(int)\n user = link(User)\n name = attr(str)\n site = link(lambda : Site) \n class Site(Strongbox):\n ID = attr(int)\n user = link(User)\n domain = link(Domain)\n dbMap = Schema({\n User:\"user\",\n Domain:\"domain\",\n Domain.user: \"userID\",\n Domain.site: \"siteID\",\n Site:\"site\",\n Site.user: \"userID\",\n Site.domain: \"domainID\",\n })\n\n clerk = Clerk(RamStorage(), dbMap)\n u = clerk.store(User(username=\"ftempy\"))\n u = clerk.match(User,username=\"ftempy\")[0]\n d = clerk.store(Domain(name=\"ftempy.com\", user=u))\n assert d.user, \"didn't follow link before fetch\"\n d = clerk.match(Domain, name=\"ftempy.com\")[0]\n\n # the bug was here: it only happened if User had .domains\n # I think because it was a linkset, and the linkset had\n # an injector. Fixed by inlining the injector test into\n # Clekr.store:\n assert d.user, \"didn't follow link after fetch\"\n assert d.user.ID == u.ID\n\n # ah, but then we had an infinite recursion problem\n # with site, but I fixed that with private.isDirty:\n d.site = clerk.store(Site(domain=d))\n d = clerk.store(d)\n assert d.site.domain.name == \"ftempy.com\"\n\n # and again here:\n d = clerk.fetch(Domain, 1)\n assert not d.private.isDirty\n assert not d.site.private.isDirty # this failed.\n clerk.store(d) # so this would recurse forever", "def test_installments_id_get(self):\n pass", "def chain(self):\n return ValueError(\"chain function not set.\")", "def test_companies_company_id_data_bill_credit_notes_bill_credit_note_id_get(self):\n pass", "def test_get_item_by_id(self):\n response = self.client.get('/api/v1/category/1',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 200)", "def test_sort_chain_multiple_reuse():\n data = [-10, 42, 8, 64, -6, 76, 48, 8, -30, 1, 11, 92, 37, 4]\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n id_record = {}\n walker = chain\n while walker is not None:\n id_record[id(walker)] = walker.data\n walker = walker.next\n\n result = A8.sort_chain(chain)\n\n walker = result\n while walker is not None:\n assert id(walker) in id_record, \"sort_chain created new node\"\n assert id_record[id(walker)] == walker.data, \"sort_chain moved data value {} to new node\".format(walker.data)\n walker = walker.next", "def test_cyclingleagues_id_get(self):\n pass", "def test_sort_chain_two_structure_3():\n chain = N.Node(2, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result is not None, \"sort_chain returned empty chain given input chain size 2 with dupicates\"\n assert result.next is not None, \"sort_chain returned singleton chain given input chain size 2 with dupicates\"\n assert result.next.next is None, \"sort_chain returned extended chain given input chain size 2 with dupicates\"", "def get_record(self, id: uplink.Path):\n pass", "def test_workflows_id_creator_get(self):\n pass", "def test_get_recipe_ingredients_by_id(self):\n pass", "def test_check_duplication_entry_at_restoring_one_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n\n # create same name entry\n Entry.objects.create(name=\"ref-1\", created_user=self._user, schema=ref_entity)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def test_find_by_id(session, id, has_results):\n party: MhrParty = MhrParty.find_by_id(id)\n if has_results:\n assert party\n assert party.id == 200000000\n assert party.address_id > 0\n assert party.party_type == MhrPartyTypes.SUBMITTING\n assert party.status_type == MhrOwnerStatusTypes.ACTIVE\n assert not party.first_name\n assert not party.middle_name\n assert not party.last_name\n assert party.business_name\n assert party.compressed_name\n assert party.registration_id\n assert party.change_registration_id\n assert party.email_id\n assert party.phone_number\n else:\n assert not party", "def testIdReturn(self):\n self.assertEqual(\n 'uniqueId',\n self.cc.id\n )", "async def test_txn_list_with_head_and_ids(self):\n paging = Mocks.make_paging_response(0, 1)\n self.stream.preset_response(\n head_id='1',\n paging=paging,\n transactions=Mocks.make_txns('0'))\n\n response = await self.get_assert_200('/transactions?id=0&head=1')\n controls = Mocks.make_paging_controls()\n self.stream.assert_valid_request_sent(\n head_id='1',\n transaction_ids=['0'],\n paging=controls)\n\n self.assert_has_valid_head(response, '1')\n self.assert_has_valid_link(response, '/transactions?head=1&id=0')\n self.assert_has_valid_paging(response, paging)\n self.assert_has_valid_data_list(response, 1)\n self.assert_txns_well_formed(response['data'], '0')", "def test_get_activities_from_recursive_contexts(self):\n from .mockers import context_query\n from .mockers import create_context\n from .mockers import subscribe_contextA, create_contextA, user_status_contextA\n from .mockers import subscribe_contextB, create_contextB, user_status_contextB\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context, permissions=dict(read='public', write='restricted', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextA, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextB, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.admin_subscribe_user_to_context(username, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextB)\n self.create_activity(username, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextB)\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 2)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username_not_me), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 3)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextB['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[2].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[2].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[2].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])", "def test_get_company_props_by_company_id_using_get(self):\n pass", "def test_sort_chain_two_structure():\n chain = N.Node(1, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result is not None, \"sort_chain returned empty chain given input chain size 2 already in order\"\n assert result.next is not None, \"sort_chain returned singleton chain given input chain size 2 already in order\"\n assert result.next.next is None, \"sort_chain returned extended chain given input chain size 2 already in order\"", "def test_get_pipeline_by_id(self):\n response = self.client.get_pipeline_by_id(2)\n self.assertEqual(response['id'], 2)", "def testFetchBodyStructureUID(self):\n return self.test_fetchBodyStructure(1)", "def get_chain(self):\n return self.fragment.chain", "def test_companies_company_id_push_get(self):\n pass", "def test_companies_company_id_data_bank_accounts_account_id_get(self):\n pass", "def test_model_flow_node_model_flow_id_node_id_component_get(self):\n pass", "def test_0_2_id_inc(self):\n\n self.b1.id = 1\n test = self.b1.id\n test2 = self.b2.id\n self.assertEqual(test, test2 - 1)", "def testGetRelatedIdentifiers(self):\n try:\n # --- Get related identifiers ---\n pcdcP = PubChemDataCacheProvider(self.__cfgOb, self.__cachePath)\n rD = pcdcP.getRelatedMapping(self.__cidList)\n logger.info(\"rD %r\", rD)\n self.assertGreaterEqual(len(rD), len(self.__cidList))\n #\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def api_node_cascades(id):\n if id not in node_store:\n abort(404)\n if request.method == 'GET':\n without = {int(i) for i in request.args.get('without', '').split()}\n ids = [i for i in relation_store[id] if i not in without]\n data = []\n for id in ids:\n item = dict(node_store[id])\n item['id'] = id\n data.append(item)\n return make_response(json.dumps(data))", "def test_find_way_left(self):\n print ( \"id: \" + self.id())\n self.assertEqual(manhattan.findway(11), 2)", "def test_workflows_id_head(self):\n pass", "def test_sort_chain_two_structure_2():\n chain = N.Node(3, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result is not None, \"sort_chain returned empty chain given input chain size 2 in reverse order\"\n assert result.next is not None, \"sort_chain returned singleton chain given input chain size 2 in reverse order\"\n assert result.next.next is None, \"sort_chain returned extended chain given input chain size 2 in reverse order\"", "def test_get2(self):\n pass", "def test_getSiblingMissing(self):\n self.store.powerUp(self.contentStore1, ISiblingStore)\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.contentStore2.getSiblingObject(objectId)\n return self.assertFailure(d, NonexistentObject\n ).addCallback(lambda e: self.assertEquals(e.objectId, objectId))", "def test_book_related(self):\n client = APIClient()\n client.login(username=self.students[0].username, password=\"salam*123\")\n response = client.get(\"/books/4/related/\")\n json = response.json()\n self.assertEqual(json[\"count\"], 2)\n self.assertEqual(json[\"results\"][0][\"id\"], 5)\n self.assertEqual(json[\"results\"][1][\"id\"], 2)" ]
[ "0.66562283", "0.6344267", "0.60350573", "0.6012869", "0.5983662", "0.5644152", "0.5638522", "0.56080955", "0.55986637", "0.5579138", "0.54949653", "0.5461469", "0.54595417", "0.5459411", "0.5446837", "0.5446837", "0.54265", "0.53988206", "0.5392831", "0.5390298", "0.5332432", "0.53292954", "0.53073096", "0.52932036", "0.5274561", "0.5259532", "0.52416277", "0.52276194", "0.5220462", "0.52119255", "0.5211398", "0.520057", "0.51988506", "0.5190481", "0.51826364", "0.51782906", "0.51729417", "0.5171109", "0.51688224", "0.51586866", "0.5152207", "0.5148064", "0.51438564", "0.5135739", "0.5114485", "0.5098713", "0.5095193", "0.50776076", "0.5065922", "0.5065259", "0.5064815", "0.50643516", "0.50624996", "0.50619483", "0.5060078", "0.50553477", "0.5037217", "0.503559", "0.5031928", "0.50231695", "0.5017575", "0.5017446", "0.5011716", "0.50075084", "0.5000902", "0.5000902", "0.49983236", "0.4997888", "0.49945307", "0.49904937", "0.49887353", "0.498828", "0.49873665", "0.49841985", "0.49808502", "0.49806803", "0.49792373", "0.49754107", "0.4950563", "0.4950449", "0.49423304", "0.49408093", "0.49403295", "0.4940024", "0.49338496", "0.49248314", "0.49247092", "0.49242225", "0.4916972", "0.49153978", "0.49026963", "0.48995146", "0.4896329", "0.48954257", "0.48950902", "0.48943287", "0.4889881", "0.48803934", "0.48724365", "0.48698977" ]
0.93600637
0
Test case for get_chains
def test_get_chains(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_chain_by_id(self):\n pass", "def get_chains (structure):\n chains=[]\n for chain in structure[0]:\n chains.append(chain)\n return chains", "def iter_chains(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(list())", "def iter_chains(self):\n return iter(self.chain_list)", "def determine_chains(self, c):\n\n if isinstance(c, str):\n c = [c]\n\n chains = [None for _ in c]\n for k in self.monomer.chains.keys():\n for i, x in enumerate(c):\n if x in self.monomer.chains[k].keys():\n chains[i] = k\n\n return chains", "def sample_chains():\n c = chain(add.s(1, 1), add.s(1), add.s(1))\n res = c()\n print(res.get())\n print(res.parent.get())\n print(res.parent.parent.get())", "def test_acyclic_chains():\n names = ['robot', 'box1', 'box2']\n chains = lambda: FactoredRandomGeometricGraph.acyclic_chains(names)\n\n expected_number = 16\n actual_number = sum(1 for _ in chains())\n assert actual_number == expected_number, \\\n \"Expected {} chains; actual value was {}\".format(\n expected_number, actual_number)\n\n assert all(\n FactoredRandomGeometricGraph.is_acyclic(chain)\n for chain in chains())", "def _test_chain(self, x, class_type_list, kwargs_list, y=None):\n chain, modules = self._create_chain(class_type_list, kwargs_list)\n\n chain = chain.fit(x, y=y)\n self.logger.info(\"Preprocessors chain:\\n{:}\".format(chain))\n\n x_chain = chain.forward(x)\n self.logger.info(\"Trasformed X (chain):\\n{:}\".format(x_chain))\n\n # Train the manual chain and transform\n x_manual = x\n for module in modules:\n module.fit(x_manual, y=y)\n x_manual = module.forward(x_manual)\n\n self.logger.info(\"Trasformed X (manual):\\n{:}\".format(x_manual))\n self.assert_allclose(x_chain, x_manual)\n\n return x_chain", "def test_find_long_chains_multiple(self):\n # a -> b -> c -> ... x\n # \\________________/\n self.skill_graph = SkillGraph.load()\n old_skill = self.skill_graph.add(Skill.build('o', ''))\n last_skill = self.skill_graph.add(Skill.build('l', ''))\n self.skill_graph.add_prerequisite(last_skill.id, old_skill.id)\n chain_ids = [old_skill.id]\n for index in range(CHAINS_MIN_LENGTH):\n new_skill = self.skill_graph.add(Skill.build(str(index), ''))\n chain_ids.append(new_skill.id)\n self.skill_graph.add_prerequisite(new_skill.id, old_skill.id)\n old_skill = new_skill\n self.skill_graph.add_prerequisite(old_skill.id, last_skill.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains()\n self.assertEqual([chain_ids], result)", "def test_find_long_chains(self):\n # a --> d --> j g h --> i\n # b _/ c --> e --> f\n self._build_sample_graph()\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n # Adding short path\n sh = self.skill_graph.add(Skill.build('h', ''))\n si = self.skill_graph.add(Skill.build('i', ''))\n self.skill_graph.add_prerequisite(si.id, sh.id)\n # Making path longer\n sj = self.skill_graph.add(Skill.build('j', ''))\n self.skill_graph.add_prerequisite(sj.id, self.sd.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains(2)\n expected = [\n [self.sa.id, self.sd.id, sj.id],\n [self.sb.id, self.sd.id, sj.id],\n [self.sc.id, self.se.id, self.sf.id]\n ]\n self.assertEqual(sorted(expected), sorted(result))", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)", "def get_all_chains() -> List[ChainInfo]:\n return list(registry.chain_dict.values())", "def chain():\n return eth_tester.EthereumTester(eth_tester.PyEVMBackend())", "def f_chains(self) -> List[Callable[[], Chain]]:\n return [delayed_run_chain() for _ in range(self.n_chains)]", "def test_sort_chain_multiple_structure_random():\n data = [-10, 42, 8, 64, -6, 76, 48, 8, -30, 1, 11, 92, 37, 4]\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(len(data)):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with randomish values\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with randomish values\".format(len(data))", "def iter_all_chains(self):\n for model in self.model_list:\n for chain in model.chain_list:\n yield chain", "def test_rewrite_chains_stub(self):\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n )\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"]})", "def make_chains(text_string, n):\n\n chains = {}\n\n # your code goes here\n words = text_string.split()\n #print words\n \n for i in range(len(words) - n):\n # next_word = words[i+2]\n #print \"Next Word\",next_word\n key_word_tuple = tuple(words[i:(i + n)])\n # print key_word_tuple\n #, words[i + 1])\n chains[key_word_tuple] = chains.get(key_word_tuple, [])\n # if (i + n) == (len(words) - 1):\n # next_word = words[0]\n # print \"i\", i\n # print \"BLINGGGG\"\n # print \"End of Range \",next_word, \"key word tuple \", key_word_tuple\n # # if (i + 2) < len(words):\n # else: \n next_word = words[i+n]\n # print next_word\n\n chains[key_word_tuple].append(next_word)\n \n\n \n \n \n # print chains[\"I\",\"am\"] \n # print chains\n return chains", "def genChains(self):\n self.numMonomer = 0\n self.numBonds = 0\n self.numMols = 0\n self.numCations = 0\n self.numAnions = 0\n\n self.atomsCoords = []\n self.atomsType = []\n self.atomsCharge = []\n self.molId = []\n self.bondList = []\n \n for i in range(self.numPa + self.numPc):\n\n if i < self.numPc:\n # polycation chains, charge in LJ units of LAMMPS\n # electron charge would be 10.54 using bare LAMMPS LJ units\n # the dielectric constans of solvent is effectively taken as 111 when assign 1 to +e\n # just need to set dielectric as 0.72 in LAMMPS ot mimic water with dielectric constant 80\n self.beadCharge = 1\n self.beadType = 1 # atomic type for neutral beads in polycation chains\n self.chain = self.lenPc\n else:\n self.beadCharge = -1 # polyanion chains\n self.beadType = 3 # atomic type for neutral beads in polyanion chains\n self.chain = self.lenPa\n\n self.numMols += 1\n\n # generate the first bead of each chain randomly\n self.numMonomer += 1\n self.cxyz = np.random.rand(3) * self.box + self.lxyz\n\n self.atomsCoords.append(self.cxyz)\n #self.atomsType.append(self.beadType)\n\n # decide if the first bead is charged or not\n if self.chargeRepeat == 1:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsType.append(self.beadType)\n self.atomsCharge.append(0)\n\n self.molId.append(self.numMols)\n\n self.currpxyz = self.cxyz\n\n # follow random walk to generate the chain\n # generate the seconb bead of the chain\n self.theta, self.phi = np.random.rand(2) * np.array([np.pi, 2 * np.pi])\n self.ds = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n # decide if the second bead is charged or not\n if 2%self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n \n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = self.theta\n self.currphi = self.phi\n\n self.dstot += self.ds\n\n # generating the rest beads of the chain\n\n for k in range(3, self.chain+1):\n # only accept atoms that are beyong certain distance\n # from the atom precding the current atom in the chain\n self.theta, self.phi = np.random.rand() * np.array([np.pi - self.stiffangle, \\\n 2 * np.pi])\n self.ds1 = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.reverseXZrotation()\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n if k % self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = np.arccos(self.ds[0]/self.segment)\n if self.ds[2] > 0:\n self.currphi = np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n else:\n self.currphi = 2*np.pi - np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n\n self.dstot += self.ds\n\n print \"%d beads are generated.\\n\" % self.numMonomer \n assert self.numMonomer == self.numPc * self.lenPc + self.numPa * self.lenPa, \\\n \"The number of monomers in chains is wrong!\\n\"\n assert self.numCations == int(np.floor(self.lenPc * self.chargeFraction)*self.numPc), \\\n \"The number of positively charged beads is wrong!\\n\"\n assert self.numAnions == int(np.floor(self.lenPa * self.chargeFraction)*self.numPa), \\\n \"The number of negatively charged beads is wrong!\\n\"", "def test_sort_chain_multiple_structure_increasing():\n n = 11\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(n-item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(n):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with values increasing\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with values increasing\".format(n)", "def test_required_deleted_chain_gets_stubbed(self):\n self.txn.store_delete(\"felix-b\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_delete, set())\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-c\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})", "def numChains(self):\n\n\t\treturn len(self.chain)", "def count_chains(self):\n return len(self.chain_list)", "def chains(self, model_num = 0):\n return [c for c in self.struct]", "def test_markow_chain():\n amount = len(markow_chain(SNULL, TIMESTEPS, PROBABILITYMATRIX))\n assert TIMESTEPS == amount", "def test_chain(self):\n self._test_chain(self.array_dense,\n ['min-max', 'pca', 'min-max', 'rbf', 'svm'],\n [{'feature_range': (-5, 5)}, {},\n {'feature_range': (0, 1)}, {}, {}],\n y=self.labels)", "def make_chains(text_string):\n\n chains = {}\n words = text_string.split()\n\n for i in range(len(words) - 2):\n key = (words[i], words[i + 1])\n value = words[i + 2]\n #print key, value\n\n if key not in chains:\n chains[key] = []\n chains[key].append(value)\n\n # print chains\n return chains", "def make_chains(input_text, n):\n\n # contents = open_and_read_file(sys.argv[1])\n\n chains = {}\n\n words = input_text.split()\n\n for i in range(len(words) - 2):\n a, b = words[i], words[i+1]\n pair = (a, b,)\n\n if pair in chains:\n chains[pair] += [words[i+2]]\n else:\n chains[pair] = [words[i+2]]\n # if chains.get(pair, False):\n # c = words[i + 2]\n # chains[pair].append(c)\n # # how can we have an empty list as a value and not reset?\n # else:\n # c = words[i + 2]\n # chains[pair] = []\n # chains[pair].append(c)\n\n # print \"C equals: \", c\n # chains[pair].append(c)\n # else add \"\" to dictionary\n return chains", "def test_post_chain_search(self):\n pass", "def decimateChains(chains, max_err = 200):\n newchains = []\n for chain in chains:\n vs = chain - chain[0]\n angles = np.arctan2(vs[:,1], vs[:,0])\n vas = angles - angles[-1]\n ds = np.linalg.norm(vs, axis=1)\n errs = np.abs(np.sin(vas) * ds)\n id_far = np.argmax(errs)\n if errs[id_far] > max_err:\n newchains += decimateChains([chain[:id_far+1], chain[id_far:]], max_err)\n else:\n newchains.append(chain)\n return newchains", "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def __get_chains(self, name, data_keys, filters, x, y, views, orientation,\r\n post_process, select):\r\n if orientation == 'y':\r\n return [self.get_chain(name, data_keys, filters, x, y_var, views,\r\n post_process, select)\r\n for y_var in y]\r\n elif orientation == 'x':\r\n return [self.get_chain(name, data_keys, filters, x_var, y, views,\r\n post_process, select)\r\n for x_var in x]\r\n else:\r\n raise ValueError(\"Unknown orientation type. Please use 'x' or 'y'.\")", "def get_chain(self):\n return self.chain", "def get_chain(self):\n return self.chain", "def makeRandomChains( nChains=1 ):\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\t\n\t# array to hold finished, random chains\n\tallChains = []\n\t\n\tfull = False\t\n\twhile( not full ):\n\t\t# array to hold the currently constructed chain\n\t\tnewChain = []\n\t\t\n\t\t# start the chain with a connected TRAP+AT\n\t\taddComponent(newChain,types[0],0,0)\n\t\taddComponent(newChain,types[1],1,1)\n\t\tconnectComponents(newChain,0,randint(0,types[0]['nSites']-1),1,randint(0,types[1]['nSites']-1))\n\n\t\tnTRAPs = 1\n\t\tnATs = 1\n\t\n\t\twhile( (nTRAPs < types[0]['max']) or (nATs < types[1]['max']) ):\n\t\t\t\n\t\t\tindex = nTRAPs + nATs\n\t\t\t\n\t\t\tif( (random() < 0.5) and (nTRAPs < types[0]['max']) ):\n\t\t\t\tlist = makeSiteList(newChain,types[0],0)\n\t\t\t\tif(len(list) > 0):\n\t\t\t\t\tsite = choice(list)\n\t\t\t\t\taddComponent(newChain,types[0],index,index)\n\t\t\t\t\tconnectComponents(newChain,site[0],site[1],index,randint(0,types[0]['nSites']-1))\n\t\t\t\t\tnTRAPs += 1\n\t\t\telif(nATs < types[1]['max']):\n\t\t\t\tlist = makeSiteList(newChain,types[1],0)\n\t\t\t\tif(len(list) > 0):\n\t\t\t\t\tsite = choice(list)\n\t\t\t\t\taddComponent(newChain,types[1],index,index)\n\t\t\t\t\tconnectComponents(newChain,site[0],site[1],index,randint(0,types[1]['nSites']-1))\n\t\t\t\t\tnATs += 1\n\t\t\t\t\t\t\t\n\t\tallChains.append( Copy(newChain) )\n\t\t\n\t\tif( len(allChains) == nChains ):\n\t\t\treturn allChains\n\tpass", "def test_sort_chain_multiple_structure_decreasing():\n n = 14\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(n):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with values decreasing\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with values decreasing\".format(n)", "def cert_chains(self) -> 'outputs.CertificateChainsResponse':\n return pulumi.get(self, \"cert_chains\")", "def make_chains(corpus):\n \n# This section reads the corpus file in to the program and splits the words in a words list. \n text = corpus.read() #this is functionally the same as the more complicated stuff below\n words = text.split()\n\n chain_dict = {}\n \"\"\"\n for line in f:\n line = line.rstrip()\n words.extend(line.split())\n \"\"\" \n#This looks at each word entry in the list that we made from original text - if the tuple does not yet exist,\n#it appends it to the dictionary as a key. If it does exist, it appends the value to the existing key.\n\n for each_number in range(len(words)-2): \n if (words[each_number], words[each_number+1]) not in chain_dict.keys():\n chain_dict[(words[each_number], words[each_number+1])] = [words[each_number+2]]\n else:\n chain_dict[(words[each_number], words[each_number+1])].append(words[each_number+2])\n \n return chain_dict", "def make_chains(text_string, n):\n chains = {}\n\n gettysburg = open_and_read_file()\n word_list = gettysburg.split()\n\n #iterating through length of word_list\n for item in range(0, len(word_list) - n):\n key_list = []\n #spefical case, last items\n # if item == len(word_list) - 3:\n # chains[(word_list[item + 1], word_list[item + 2])] = [None]\n #iterating to find tuple_keys until n\n for count in range(item, n + item):\n\n key_list.append(word_list[count])\n #print key_list\n tuple_key = tuple(key_list)\n\n #print tuple_key\n\n if tuple_key not in chains:\n\n chains[tuple_key] = [word_list[item + n]]\n else:\n chains[tuple_key].append(word_list[item + n])\n\n # print word_list\n\n return chains\n\n # your code goes here\n\n #print chains", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def count_chains(self):\n if self.default_model:\n return self.default_model.count_chains()\n return 0", "def test_sort_chain_two_structure():\n chain = N.Node(1, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result is not None, \"sort_chain returned empty chain given input chain size 2 already in order\"\n assert result.next is not None, \"sort_chain returned singleton chain given input chain size 2 already in order\"\n assert result.next.next is None, \"sort_chain returned extended chain given input chain size 2 already in order\"", "def validate_chain():", "def extract_all_chains(seq_section_gen):\n raw_chains = list(seq_section_gen)\n\n chain_isolation_regex = re.compile(r'^\\w+\\s+\\d+\\s+(.*)')\n\n mixed_chains = [\n re.search(chain_isolation_regex,\n raw_chain).group(1).strip() # remove whitespace\n for raw_chain in raw_chains\n ]\n\n mixed_chains = extract_mixed_chains(raw_chains)\n\n x = re.compile(r'^\\w+\\s+\\d+\\s+(.*)')\n\n # Create a dict of empty lists\n init = dict((chain, '') for chain in chain_set(mixed_chains))\n\n [\n init.update(\n {i[0]: init[i[0]] + ' ' + re.search(x, i).group(1).strip()})\n for i in mixed_chains\n ]\n\n return init", "def test_sort_chain_two_structure_3():\n chain = N.Node(2, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result is not None, \"sort_chain returned empty chain given input chain size 2 with dupicates\"\n assert result.next is not None, \"sort_chain returned singleton chain given input chain size 2 with dupicates\"\n assert result.next.next is None, \"sort_chain returned extended chain given input chain size 2 with dupicates\"", "def captured_chains(vfile, efile):\n # Graph class convertes vfile and efile into a graph\n graph = Graph(vfile, efile)\n\n # visited list is global so it can be accessed through any of the recursive calls\n global visited\n visited = [0]*len(graph.vertex_properties)\n res = []\n\n # runs on every vertex which ensures that no vertex is skipped\n for vertex_index in range(len(visited)):\n # vertices may have already been visited due to being part of the DFS so the visited list ensures no vertex is\n # visited more than once\n if not visited[vertex_index]:\n # current_chain and captured_chain_check is global so that it can be accessed through the recursive calls\n global current_chain\n current_chain = []\n global captured_chain_check\n\n # calls the queue class\n captured_chain_check = deque()\n\n DFS(graph, vertex_index)\n if len(current_chain) != 0 and len(captured_chain_check) == 0:\n res.append(current_chain)\n captured_chain_check.clear()\n return res", "def wire_chains(self):\n allChains = self.instances.getAllChainInstances()\n for chain in allChains:\n logging.debug(\"%s\", chain)\n allChains[chain].setup_event_path()", "def chain(self):\n return ValueError(\"chain function not set.\")", "def fetch_chain(self, certr, max_length=10):\n action = LOG_ACME_FETCH_CHAIN()\n with action.context():\n if certr.cert_chain_uri is None:\n return succeed([])\n elif max_length < 1:\n raise errors.ClientError('chain too long')\n return (\n DeferredContext(\n self._client.get(\n certr.cert_chain_uri,\n content_type=DER_CONTENT_TYPE,\n headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))\n .addCallback(self._parse_certificate)\n .addCallback(\n lambda issuer:\n self.fetch_chain(issuer, max_length=max_length - 1)\n .addCallback(lambda chain: [issuer] + chain))\n .addActionFinish())", "def get_chains_list(mmtf_dict, groups):\n\n chains = []\n for i_id, id, group_num in zip(mmtf_dict[\"chainIdList\"],\n mmtf_dict[\"chainNameList\"], mmtf_dict[\"groupsPerChain\"]):\n chain = {\"id\": id, \"internal_id\": i_id, \"groups\": groups[:group_num]}\n del groups[:group_num]\n for entity in mmtf_dict[\"entityList\"]:\n if len(chains) in entity[\"chainIndexList\"]:\n chain[\"type\"] = entity[\"type\"]\n chain[\"sequence\"] = entity.get(\"sequence\", \"\")\n chain[\"full_name\"] = entity.get(\"description\", None)\n break\n chains.append(chain)\n return chains", "def test_sort_chain_two_structure_2():\n chain = N.Node(3, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result is not None, \"sort_chain returned empty chain given input chain size 2 in reverse order\"\n assert result.next is not None, \"sort_chain returned singleton chain given input chain size 2 in reverse order\"\n assert result.next.next is None, \"sort_chain returned extended chain given input chain size 2 in reverse order\"", "def trip_chain(self):\n pass", "def make_chains(text_string):\n\n chains = {}\n text_list = text_string.split()\n\n index = 0\n while index < (len(text_list) - 2):\n\n # create a variable to hold the current chain key\n chain_key = (text_list[index], text_list[index+1])\n # create a variable to hold the dictionary value\n new_value = text_list[index+2]\n\n if chain_key not in chains:\n chains[chain_key] = []\n\n chains[chain_key].append(new_value)\n\n index = index + 1\n # your code goes here\n\n return chains", "def make_chains(text_string, n):\n\n chains = {}\n\n # your code goes here\n words = text_string.split()\n words.append(None)\n \n for i in range(0, len(words)-n):\n t = tuple(words[i:n+i])\n chains[t] = chains.get(t, []) + [words[i+n]]\n \n return chains", "def detect_chains(input_pdb):\n opened_pdb = open(input_pdb, \"r\").readlines()\n chains = []\n for value in opened_pdb[1:]:\n try:\n if value[21] not in chains:\n chains.append(value[21])\n except:\n continue\n return chains", "def get_chain(self, **kwargs):\n self._check_if_fitted()\n return self._sampler.get_chain(**kwargs)", "def make_chains(text_string):\n\n chains = {}\n #create an empty dictionary\n words = text_string.split()\n #make text into string and then splitting it\n \n for i in range(len(words)-2):\n #for every word in string except for last two words\n bi_gram = (words[i], words[i + 1])\n #created key variable into tuple using two adjacent words\n if bi_gram in chains:\n # if the key is in the dictionary add the next word into list of values/words\n chains[bi_gram].append(words[i+2])\n else:\n # if word is not already in dictionary, create the list where word is placed\n chains[bi_gram] = [words[i+2]]\n #defining variable for last key/tuple \n last_bi_gram = (words[-2],words[-1])\n #if it is the last bi_gram in dictionary, add None as a value in list of words else add none/ nothing\n if last_bi_gram in chains:\n chains[last_bi_gram].append(None)\n else:\n chains[last_bi_gram] = [None]\n\n return chains", "def make_chains(word_list):\n\n chains = {}\n for index in range(0, len(word_list) - 2):\n # only making small chains because I like maximum absurdity\n key = tuple(word_list[index:index + 2])\n if key not in chains:\n chains[key] = [word_list[index + 2]]\n else:\n chains[key].append(word_list[index + 2])\n return chains", "def get_n_chains(self): \n res_id_cnt = 0\n tot_n_res = len(self.res_ids)\n n_chns = 0\n for res_id in self.res_ids:\n res_chn_i = res_id[2]\n if res_id_cnt > 1:\n if res_chn_i == self.res_ids[res_id_cnt-1][2]:\n pass\n else:\n n_chns+=1\n res_id_cnt+=1\n return n_chns", "def metro_alg(N):\n\n chain = []\n chain_removed = []\n chain.append(0)\n chain_removed.append(0)\n\n for i in range(N):\n j = 0\n y = (np.random.rand()-0.5)*10\n if next_chain_link(chain[i], y):\n chain.append(y)\n else:\n chain.append(chain[i])\n\n if next_chain_link(chain_removed[j], y):\n chain_removed.append(y)\n j += 1\n\n return chain, chain_removed", "def parse_chains(data, tool_names):\n triplets = {}\n count = 0\n total = len(data)\n for chain in data:\n print >> sys.stderr, (\"\\rParsing chain %06i of %06i\") % (count, total),\n count += 1\n # create a dataset index array for first column of chain\n index = numpy.r_[0, numpy.bincount(chain[:, 0])]\n for i in range(1, index.shape[0]):\n index[i] += index[i - 1]\n # for each entry in the chain, follow it forward 2 steps if possible to determine tool triplets\n for i in range(chain.shape[0]):\n find_triplet(triplets, chain, index, i, True, chain[i, 2])\n # filter out empty entries and convert counts into percentages\n filtered_triplets = {}\n for triplet, values in triplets.iteritems():\n # remove empty entries\n if len(values) == 0:\n continue\n # find total count\n total = float(sum(values.values()))\n # convert to percentages\n tool_values = {}\n for tool, value in values.iteritems():\n tool_values[str(tool)] = value / total\n # convert tool indices to names\n new_key = \"%i,%i\" % (triplet[0], triplet[1])\n filtered_triplets[new_key] = tool_values\n print >> sys.stderr, (\"\\rFinished parsing chains into triplets \\n\"),\n return filtered_triplets", "def make_chains(markov_chains, clean_words):\n\n for i in range(len(clean_words)-2):\n\n tuple_words = (clean_words[i], clean_words[i+1])\n value = clean_words[i+2]\n\n if tuple_words not in markov_chains:\n markov_chains[tuple_words] = [value]\n else:\n markov_chains[tuple_words].append(value)\n\n return markov_chains", "def test_post_chain(self):\n pass", "def chain_cmd(ctx):\n pass", "def test_chain(mocker):\n transaction = Transaction(\n chain=-1,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)\n\n transaction.chain = 15\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.chain = 257\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)", "def test_cons(self):", "def test_sort_chain_multiple_content_increasing():\n n = 11\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(n-item-1, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n prev = None\n seen = [False]*n\n for i in range(n):\n assert walker.data in data, \"sort_chain created extraneous data {} given chain with values increasing\".format(walker.data)\n seen[walker.data] = True\n if prev is not None:\n assert prev.data <= walker.data, \"sort_chain placed {} before {} given chain with values increasing\".format(prev.data, walker.data)\n prev = walker\n walker = walker.next\n\n for i,b in enumerate(seen):\n assert b, \"sort_chain omitted data value {} from returned chain given chain with values increasing\".format(i)", "def test_sort_chain_multiple_content_random():\n data = [-10, 42, 8, 64, -6, 76, 48, 8, -30, 1, 11, 92, 37, 4]\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n prev = None\n seen = {}\n for v in data:\n seen[v] = False\n for i in range(len(data)):\n assert walker.data in data, \"sort_chain created extraneous data {} given chain with with randomish values\".format(walker.data)\n seen[walker.data] = True\n if prev is not None:\n assert prev.data <= walker.data, \"sort_chain placed {} before {} given chain with with randomish values\".format(prev.data, walker.data)\n prev = walker\n walker = walker.next\n\n for k in seen:\n assert seen[k], \"sort_chain omitted data value {} from returned chain given chain with with randomish values\".format(k)", "def test_markov_word_chain_builder():\n input_sequence = (\n 'PIKARD', 'Q', 'PIKARD', 'DORF', 'PIKARD', 'Q', 'ROKER', 'Q', 'PIKARD'\n )\n expected_probabilities = {\n 'PIKARD': [('Q', 2.0 / 3), ('DORF', 1.0 / 3)],\n 'Q': [('PIKARD', 2.0 / 3), ('ROKER', 1.0 / 3)],\n 'DORF': [('PIKARD', 1.0)],\n 'ROKER': [('Q', 1.0)],\n }\n builder = markov.WordChainBuilder()\n for word in input_sequence:\n builder.add_next(word)\n chain = builder.normalize()\n\n for leader, probs in expected_probabilities.items():\n assert leader in chain\n assert sort_probs(probs) == sort_probs(chain[leader])", "def test_delete_required_chain_stub(self):\n # Exit the graceful restart period, during which we do not stub out\n # chains.\n self.ipt.cleanup(async=True)\n # Install a couple of chains. foo depends on bar.\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"],\n \"bar\": [\"--append bar --jump ACCEPT\"]},\n {\"foo\": set([\"bar\"]),\n \"bar\": set()},\n async=True,\n )\n self.step_actor(self.ipt)\n # Both chains should be programmed as normal.\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [\"--append bar --jump ACCEPT\"] })\n\n # Deleting bar should stub it out instead.\n self.ipt.delete_chains([\"bar\"], async=True)\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"] })", "def diagnose_chain(chain):\n if chain[0] == 'all':\n dir = data.meta_dir_base()\n if os.path.exists(dir):\n for chain_id in os.listdir(dir):\n if utils.valid_chain_id(chain_id):\n diagnose_server(chain_id)\n else:\n consoler.info(' No published chain exist, do nothing.')\n else:\n for i in range(len(chain)):\n chain_get = chain[i].split(':')\n if len(chain_get) == 1:\n if utils.valid_chain_id(chain_get[0]):\n diagnose_server(chain_get[0])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s', chain_get[0])\n elif len(chain_get) == 2:\n if utils.valid_chain_id(chain_get[0]):\n if utils.valid_ip(chain_get[1]):\n ansible.diagnose_module(\n chain_get[1], ansible.get_dir() + '/' + chain_get[0])\n else:\n consoler.info(\n ' skip, invalid host, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid format, not chain_id:host, input %s', chain_get)", "def make_chains(self, input_text):\n\n chains = {}\n\n words = input_text.split()\n\n for i in range(len(words) - 2):\n key = (words[i], words[i + 1])\n value = words[i + 2]\n\n if key not in chains:\n chains[key] = []\n\n chains[key].append(value)\n\n return chains", "def make_chains(text_string, n):\n \n chains = {}\n \n text_string_list = text_string.split()\n\n\n # Make a tuple of two adjecnt words\n for i in range(len(text_string_list)-n):\n chain_key = []\n for num in range(n):\n chain_key= text_string_list[i:i+n]\n key_tuple = tuple(chain_key)\n #print(key_tuple)\n value = text_string_list[i+n]\n #print(value)\n if key_tuple in chains:\n chains[key_tuple].append(value)\n else:\n chains[key_tuple] = [value]\n\n print(chains)", "async def b_chain() -> dict:\n authority_chain = await chain.consensus()\n return {\"chain\": authority_chain[\"chain\"]}", "def test_get_learners(self):\n pass", "def test():\n from chains.sources import packet_streamer\n from chains.links import packet_meta\n from chains.links import reverse_dns\n from chains.utils import file_utils\n\n # Create a PacketStreamer and set its output to PacketSummary input\n data_path = file_utils.relative_dir(__file__, '../../data/http.pcap')\n\n streamer = packet_streamer.PacketStreamer(iface_name=data_path, max_packets=50)\n meta = packet_meta.PacketMeta()\n rdns = reverse_dns.ReverseDNS()\n printer = PacketSummary()\n\n # Set up the chain\n meta.link(streamer)\n rdns.link(meta)\n printer.link(rdns)\n\n # Pull the chain\n printer.pull()", "def test_cards_get_list(self):\n pass", "def chain():\n chain_identifier, url = get_vars(request, [\"id\", \"data\"])\n info('chain=%s' % chain_identifier)\n chain = LAPPS_SERVICE_CHAINS.get_chain(chain_identifier)\n info('source-url=%s' % url)\n data = requests.get(url).text\n result = chain.run({\n \"discriminator\": \"http://vocab.lappsgrid.org/ns/media/text\", \n \"payload\": data})\n info(\"discriminator=%s\" % result.get('discriminator'))\n return render_template(\"chain.html\",\n chain=chain,\n fname=url,\n result=result,\n builder=HtmlBuilder())", "def test_sort_chain_multiple_reuse():\n data = [-10, 42, 8, 64, -6, 76, 48, 8, -30, 1, 11, 92, 37, 4]\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n id_record = {}\n walker = chain\n while walker is not None:\n id_record[id(walker)] = walker.data\n walker = walker.next\n\n result = A8.sort_chain(chain)\n\n walker = result\n while walker is not None:\n assert id(walker) in id_record, \"sort_chain created new node\"\n assert id_record[id(walker)] == walker.data, \"sort_chain moved data value {} to new node\".format(walker.data)\n walker = walker.next", "def test_get_list(self):\n pass", "def make_chains(text_string, n):\n\n chains = {}\n # split() defaults to splitting at white space\n words = text_string.split()\n\n # using range turns it into a list of indices to loop through\n # rather than looping over the words, allowing you to access\n # + 1 or +2 for example\n for i in range(len(words)):\n # assigning a tuple to the var 'key', which is the first\n # n words in the list of words. using % allows you to\n # loop around from the end back to the beginning. this won't\n # have an effect until you are at the end of the list\n key = tuple(words[i:n+i % len(words)])\n\n #if the key isn't in the dictionary:\n if key not in chains:\n #add an empty list as the value\n chains[key] = []\n # add the 3rd word to the value list. this also uses mod\n # to loop over the end of the list\n chains[key].append(words[(i + n) % len(words)])\n # adds None value to the end for a stopping point\n chains[key].append(None)\n # return the filled up dictoinary\n return chains", "def getExpectations():", "def test_markov_to_chain():\n chain = {\n 'PIKARD': [('Q', 2.0 / 3), ('DORF', 1.0 / 3)],\n 'Q': [('PIKARD', 2.0 / 3), ('ROKER', 1.0 / 3)],\n 'DORF': [('PIKARD', 1.0)],\n 'ROKER': [('Q', 1.0)],\n }\n with markov.DialogChainDatastore() as store:\n store.reinitialize()\n store.store_chain('speakers', chain)\n new_chain = store.to_chain('speakers')\n assert set(chain.keys()) == set(new_chain.keys())\n for speaker, probabilities in six.iteritems(chain):\n assert sort_probs(probabilities) == sort_probs(new_chain[speaker])", "def __iter__(self):\n return iter(self.chain_list)", "def test_comicscreators_get(self):\n pass", "def get_chain(self, name=None, data_keys=None, filters=None, x=None, y=None,\r\n views=None, post_process=True, orient_on=None, select=None):\r\n\r\n #Make sure all the given keys are in lists\r\n data_keys = self._force_key_as_list(data_keys)\r\n # filters = self._force_key_as_list(filters)\r\n views = self._force_key_as_list(views)\r\n\r\n if orient_on:\r\n if x is None:\r\n x = self.describe()['x'].drop_duplicates().values.tolist()\r\n if y is None: \r\n y = self.describe()['y'].drop_duplicates().values.tolist()\r\n if views is None:\r\n views = self._Stack__view_keys\r\n views = [v for v in views if '|default|' not in v]\r\n return self.__get_chains(name=name, data_keys=data_keys,\r\n filters=filters, x=x, y=y, views=views,\r\n post_process=post_process,\r\n orientation=orient_on, select=select)\r\n else:\r\n chain = Chain(name)\r\n found_views = []\r\n missed_views = []\r\n\r\n #Make sure all the given keys are in lists\r\n x = self._force_key_as_list(x)\r\n y = self._force_key_as_list(y)\r\n\r\n if data_keys is None:\r\n # Apply lazy data_keys if none given\r\n data_keys = self.keys()\r\n\r\n the_filter = \"no_filter\" if filters is None else filters\r\n\r\n if self.__has_list(data_keys):\r\n for key in data_keys:\r\n\r\n # Use describe method to get x keys if not supplied.\r\n if x is None:\r\n x_keys = self.describe()['x'].drop_duplicates().values.tolist()\r\n else:\r\n x_keys = x\r\n\r\n # Use describe method to get y keys if not supplied.\r\n if y is None:\r\n y_keys = self.describe()['y'].drop_duplicates().values.tolist()\r\n else:\r\n y_keys = y\r\n\r\n # Use describe method to get view keys if not supplied.\r\n if views is None:\r\n v_keys = self.describe()['view'].drop_duplicates().values.tolist()\r\n v_keys = [v_key for v_key in v_keys if '|default|'\r\n not in v_key]\r\n else:\r\n v_keys = views\r\n\r\n chain._validate_x_y_combination(x_keys, y_keys, orient_on)\r\n chain._derive_attributes(key,the_filter,x_keys,y_keys,views)\r\n\r\n # Apply lazy name if none given\r\n if name is None:\r\n chain._lazy_name()\r\n\r\n for x_key in x_keys:\r\n for y_key in y_keys:\r\n\r\n if views is None:\r\n chain[key][the_filter][x_key][y_key] = self[key][the_filter][x_key][y_key]\r\n else:\r\n for view in views:\r\n try:\r\n chain[key][the_filter][x_key][y_key][view] = self[key][the_filter][x_key][y_key][view]\r\n\r\n if view not in found_views:\r\n found_views.append(view)\r\n except KeyError:\r\n if view not in missed_views:\r\n missed_views.append(view)\r\n else:\r\n raise ValueError('One or more of your data_keys ({data_keys}) is not in the stack ({stack_keys})'.format(data_keys=data_keys, stack_keys=self.keys()))\r\n if found_views:\r\n chain.views = [view for view in chain.views\r\n if view in found_views]\r\n\r\n for view in missed_views:\r\n if view in found_views:\r\n missed_views.remove(view)\r\n\r\n if post_process:\r\n chain._post_process_shapes(self[chain.data_key].meta)\r\n\r\n if select is not None:\r\n for view in chain[key][the_filter][x_key][y_key]:\r\n df = chain[key][the_filter][x_key][y_key][view].dataframe\r\n levels = df.index.levels\r\n selection = {}\r\n for var in select:\r\n level = functions.find_variable_level(levels, var)\r\n if level is not None:\r\n selection[var] = level\r\n\r\n #Don't do anything if the selection doesnt produce a result\r\n if selection:\r\n # selection = {var: functions.find_variable_level(levels, var) for var in select}\r\n list_of_dfs = [df.xs(var, level=selection[var]) for var in selection.keys()]\r\n new_df = pd.concat(list_of_dfs)\r\n # Reconstruct the index\r\n new_df.index= pd.MultiIndex.from_product([levels[0],selection.keys()], names=df.index.names)\r\n chain[key][the_filter][x_key][y_key][view].dataframe = new_df\r\n\r\n return chain", "def chain(self):\n return self._chain", "def numChains(self):\n\n return self.getHierView().numChains()", "def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None", "def test_settle_tx_known_chain(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=self.ledger_id,\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.decision_maker.ledger_apis, \"transfer\", return_value=\"tx_digest\"\n ):\n tx_digest = self.decision_maker._settle_tx(tx_message)\n assert tx_digest == \"tx_digest\"", "def make_chains(text_string):\n chains = {} \n\n words = text_string.split()\n \n for i in range(len(words) - 2):\n word_after_pair = words[i + 2]\n word_pair = (words[i], words[i + 1])\n\n if word_pair not in chains:\n chains[word_pair] = []\n #Need to make the value a list by putting brackets around it\n chains[word_pair].append(word_after_pair)\n\n return chains\n\n #print word_pair\n\n #tuple is in dict\n #tuple is not in dict, inlude it as a new addition to the list\n \n # input_text = {}\n # for text in \n \n # chains = make_chains(input_text)", "def chain(self):\n return self.sampler.chain", "def test_chain_selection(self, blockchain, genesis, block1, block2, block3, block4, block5, block6):\n blockchain.add(block4)\n # gids:\n # 0 <- 1 <- 3\n # 0 <- 2 <- 4\n\n assert blockchain._leaves == {hash(block3), hash(block4)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block4)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1, hash(block3): 2}\n assert blockchain._longest_chain == {hash(genesis), hash(block1), hash(block3)}\n\n blockchain.add(block5)\n # gids:\n # 0 <- 1 <- 3\n # 0 <- 2 <- 4 <- 5\n\n assert blockchain._leaves == {hash(block3), hash(block5)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block4)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block5)][Blockchain._CHAIN_LENGTH_KEY] == 4\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block2): 1, hash(block4): 2, hash(block5): 3}\n assert blockchain._longest_chain == {hash(genesis), hash(block2), hash(block4), hash(block5)}\n\n blockchain.add(block6)\n # gids:\n # 0 <- 1 <- 3 <- 6\n # 0 <- 2 <- 4 <- 5\n\n assert blockchain._leaves == {hash(block5), hash(block6)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block4)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._G.node[hash(block5)][Blockchain._CHAIN_LENGTH_KEY] == 4\n assert blockchain._G.node[hash(block6)][Blockchain._CHAIN_LENGTH_KEY] == 4\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block2): 1, hash(block4): 2, hash(block5): 3}\n assert blockchain._longest_chain == {hash(genesis), hash(block2), hash(block4), hash(block5)}", "def test_blind_sig_chain(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n\n ca = ECCBlind()\n signer_obj = ca\n\n output = bytearray()\n\n for level in range(test_levels):\n if not level:\n output.extend(ca.pubkey())\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertTrue(verifychain.verify(msg=msg, value=1))", "def getChainDef(self,chainDicts):\n\n # generation of ChainDef objects for each signature\n if self.doMuonChains:\n try:\n import TriggerMenu.muon.generateMuonChainDefs \n except Exception:\n log.error('Problems when importing MuonDef.py, disabling muon chains.')\n log.info(traceback.print_exc())\n self.doMuonChains = False\n \n if self.doBphysicsChains:\n try:\n import TriggerMenu.bphysics.generateBPhysicsChainDefs \n except Exception:\n log.error('Problems when importing BphysicsDef.py, disabling Bphysics chains.')\n log.info(traceback.print_exc())\n self.doBphysicsChains = False\n \n if self.doMETChains:\n try:\n import TriggerMenu.met.generateMETChainDefs \n except Exception:\n log.error('Problems when importing MissingETDef.py, disabling MET chains.')\n log.info(traceback.print_exc())\n self.doMETChains = False\n\n if self.doTauChains:\n try:\n import TriggerMenu.tau.generateTauChainDefs \n except Exception:\n log.error('Problems when importing TauDef.py, disabling tau chains.')\n log.info(traceback.print_exc())\n self.doTauChains = False\n\n if self.doEgammaChains:\n try:\n import TriggerMenu.egamma.generateElectronChainDefs \n import TriggerMenu.egamma.generatePhotonChainDefs \n except Exception:\n log.error('Problems when importing EgammaDef.py or PhotonDef.py, disabling egamma chains.')\n log.info(traceback.print_exc())\n self.doEgammaChains = False\n\n if self.doJetChains:\n try:\n import TriggerMenu.jet.generateJetChainDefs \n except Exception:\n log.error('Problems when importing JetDef.py or JetDef_HT.py, disabling jet chains.')\n log.info(traceback.print_exc())\n self.doJetChains = False\n\n if self.doBjetChains:\n try:\n import TriggerMenu.bjet.generateBjetChainDefs \n except Exception:\n log.error('Problems when importing BjetDef.py disabling bjet chains.')\n log.info(traceback.print_exc())\n self.doBjetChains = False\n\n if self.doMinBiasChains:\n try:\n import TriggerMenu.minbias.generateMinBiasChainDefs \n except Exception:\n log.error('Problems when importing MinBiasDef.py, disabling MinBias chains.')\n log.info(traceback.print_exc())\n self.doMinBiasChains = False\n\n if self.doHeavyIonChains:\n try:\n import TriggerMenu.heavyion.generateHeavyIonChainDefs\n except Exception:\n log.error('Problems when importing HeavyIonDef.py, disabling HeavyIon chains.')\n log.info(traceback.print_exc())\n self.doHeavyIonChains = False\n\n if self.doCosmicChains:\n try:\n import TriggerMenu.calibcosmicmon.generateCosmicChainDefs \n except Exception:\n log.error('Problems when importing CosmicDef.py, disabling cosmic chains.')\n log.info(traceback.print_exc())\n self.doCosmicChains = False\n\n if self.doCalibrationChains:\n try:\n import TriggerMenu.calibcosmicmon.generateCalibChainDefs \n except Exception:\n log.error('Problems when importing CalibDef.py, disabling calibration chains.')\n log.info(traceback.print_exc())\n self.doCalibrationChains = False\n \n if self.doStreamingChains:\n try:\n import TriggerMenu.calibcosmicmon.generateStreamingChainDefs \n except Exception:\n log.error('Problems when importing Streaming.py, disabling streaming chains.')\n log.info(traceback.print_exc())\n self.doStreamingChains = False\n \n\n if self.doMonitorChains:\n try:\n import TriggerMenu.calibcosmicmon.generateMonitoringChainDefs \n except Exception:\n log.error('Problems when importing Monitor.py, disabling monitoring chains.')\n log.info(traceback.print_exc())\n self.doMonitorChains = False\n\n if self.doBeamspotChains:\n try:\n import TriggerMenu.calibcosmicmon.generateBeamspotChainDefs \n except Exception:\n log.error('Problems when importing Beamspot.py, disabling beamspot chains.')\n log.info(traceback.print_exc())\n self.doBeamspotChains = False\n\n if self.doEnhancedBiasChains:\n try:\n import TriggerMenu.calibcosmicmon.generateEnhancedBiasChainDefs \n except Exception:\n log.error('Problems when importing EnhancedBias.py, disabling EnhancedBias chains.')\n log.info(traceback.print_exc())\n self.doEnhancedBiasChains = False\n\n if self.doTestChains:\n try:\n import TriggerMenu.test.generateTestChainDefs \n except Exception:\n log.error('Problems when importing Test.py, disabling Test chains.')\n log.info(traceback.print_exc())\n self.doTestChains = False\n\n \n if self.doCombinedChains:\n try:\n import TriggerMenu.combined.generateCombinedChainDefs \n except Exception:\n log.error('Problems when importing generateCombinedChainDefs.py, disabling Topo on combined chains.')\n log.info(traceback.print_exc())\n self.doCombinedChains = False\n\n\n\n listOfChainDefs = []\n\n log.debug(\"\\n chainDicts1 %s \", chainDicts)\n chainDicts = TriggerMenu.menu.MenuUtils.splitInterSignatureChainDict(chainDicts) \n log.debug(\"\\n chainDicts2 %s\", chainDicts)\n \n\n #print 'doEgammaChains, doMuonChains', self.doEgammaChains, self.doMuonChains\n\n for chainDict in chainDicts:\n chainDef = None\n #print 'checking chainDict for chain %s %s %r' %(chainDict['chainName'],chainDict[\"signature\"], self.doEnhancedBiasChains)\n\n if (chainDict[\"signature\"] == \"Jet\" or chainDict[\"signature\"] == \"HT\") and (self.doJetChains or self.doBjetChains):\n bjetchain = False\n for chainpart in chainDict[\"chainParts\"]:\n if chainpart['bTag']: bjetchain = True\n\n if (bjetchain is True) and self.doBjetChains:\n try:\n chainDef = TriggerMenu.bjet.generateBjetChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for bjet chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n elif self.doJetChains: \n try:\n chainDef = TriggerMenu.jet.generateJetChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n\n elif chainDict[\"signature\"] == \"Muon\" and self.doMuonChains:\n try:\n chainDef = TriggerMenu.muon.generateMuonChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Bphysics\" and self.doBphysicsChains:\n try:\n chainDef = TriggerMenu.bphysics.generateBPhysicsChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Electron\" and self.doEgammaChains:\n try:\n chainDef = TriggerMenu.egamma.generateElectronChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Photon\" and self.doEgammaChains:\n try:\n chainDef = TriggerMenu.egamma.generatePhotonChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n \n elif (chainDict[\"signature\"] == \"MET\" or chainDict[\"signature\"] == \"XS\" or chainDict[\"signature\"] == \"TE\") and self.doMETChains:\n try:\n chainDef = TriggerMenu.met.generateMETChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Tau\" and self.doTauChains:\n try:\n chainDef = TriggerMenu.tau.generateTauChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"MinBias\" and self.doMinBiasChains:\n try:\n chainDef = TriggerMenu.minbias.generateMinBiasChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"HeavyIon\" and self.doHeavyIonChains:\n try:\n chainDef = TriggerMenu.heavyion.generateHeavyIonChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Cosmic\" and self.doCosmicChains:\n try:\n chainDef = TriggerMenu.calibcosmicmon.generateCosmicChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Calibration\" and self.doCalibrationChains:\n try:\n chainDef = TriggerMenu.calibcosmicmon.generateCalibChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Streaming\" and self.doStreamingChains:\n try:\n chainDef = TriggerMenu.calibcosmicmon.generateStreamingChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Monitoring\" and self.doMonitorChains:\n try:\n chainDef = TriggerMenu.calibcosmicmon.generateMonitoringChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"Beamspot\" and self.doBeamspotChains:\n try:\n chainDef = TriggerMenu.calibcosmicmon.generateBeamspotChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n elif chainDict[\"signature\"] == \"EnhancedBias\" and self.doEnhancedBiasChains:\n try:\n chainDef = TriggerMenu.calibcosmicmon.generateEnhancedBiasChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n \n elif chainDict[\"signature\"] == \"Test\" and self.doTestChains:\n try:\n chainDef = TriggerMenu.test.generateTestChainDefs.generateChainDefs(chainDict)\n except Exception:\n log.error('Problems creating ChainDef for chain %s ' % (chainDict['chainName']))\n log.info(traceback.print_exc())\n continue\n\n\n else: \n log.error('Chain %s ignored - either because the trigger signature (\"slice\") has been turned off or because the corresponding chain dictionary cannot be read.' %(chainDict['chainName']))\n log.debug('Chain dictionary of failed chain is %s.', chainDict)\n \n log.debug(' ChainDef %s ' % chainDef)\n from .ChainDef import ErrorChainDef,ChainDef\n if isinstance(chainDef, ErrorChainDef): \n self.listOfErrorChainDefs.append(chainDict['chainName'])\n continue\n elif isinstance(chainDef, ChainDef):\n listOfChainDefs.append(chainDef)\n\n \n doTopo = self.CheckIntraSignatureTopo(chainDicts) and chainDict[\"topo\"]\n\n if len(listOfChainDefs) == 0 or not (len(listOfChainDefs)==len(chainDicts)):\n return False\n elif len(listOfChainDefs)>1:\n if (\"mergingStrategy\" in chainDicts[0].keys()):\n theChainDef = TriggerMenu.menu.MenuUtils.mergeChainDefs(listOfChainDefs,chainDicts[0][\"mergingStrategy\"],chainDicts[0][\"mergingOffset\"],preserveL2EFOrder = chainDicts[0][\"mergingPreserveL2EFOrder\"],doTopo=doTopo,chainDicts=chainDicts)#, noTEreplication = chainDicts[0][\"mergingNoTEreplication\"])\n else:\n log.error(\"No merging strategy specified for combined chain %s\" % chainDicts[0]['chainName'])\n \n\n else:\n theChainDef = listOfChainDefs[0]\n\n #Do TOPO on Combined chains\n if self.doCombinedChains:\n if doTopo:\n theChainDef = TriggerMenu.combined.generateCombinedChainDefs._addTopoInfo(theChainDef,chainDicts,listOfChainDefs)\n \n return theChainDef", "def metro_alg(N):\n\n chain = np.zeros(N) # start with x_0 = 0\n chain_removed = np.array([0])\n j = 0\n for i in range(N-1):\n\n y = (np.random.rand()-0.5)*10\n if next_chain_link(chain[i], y):\n chain[i + 1] = y\n else:\n chain[i + 1] = chain[i]\n\n if next_chain_link(chain_removed[j], y):\n chain_removed = np.append(chain_removed, y) # append creates new array, does not change array argument\n j += 1\n\n return chain, chain_removed", "def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None):\n # check if it's a Chain object\n if heavy_chains is None and light_chains is None and fab is None:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n # check if fab object is a list and if all object are abpytools.Fab objects\n if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab):\n self._fab = fab\n self._light_chains = ChainCollection([x[0] for x in self._fab])\n self._heavy_chains = ChainCollection([x[1] for x in self._fab])\n\n if fab is None and (heavy_chains is not None and light_chains is not None):\n\n if isinstance(heavy_chains, list):\n self._heavy_chains = ChainCollection(antibody_objects=heavy_chains)\n\n elif isinstance(heavy_chains, ChainCollection):\n self._heavy_chains = heavy_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if isinstance(light_chains, list):\n self._light_chains = ChainCollection(antibody_objects=light_chains)\n\n elif isinstance(light_chains, ChainCollection):\n self._light_chains = light_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if len(self._light_chains.loading_status()) == 0:\n self._light_chains.load()\n\n if len(self._heavy_chains.loading_status()) == 0:\n self._heavy_chains.load()\n\n if self._light_chains.n_ab != self._heavy_chains.n_ab:\n raise ValueError('Number of heavy chains must be the same of light chains')\n\n if isinstance(names, list) and all(isinstance(name, str) for name in names):\n if len(names) == self._heavy_chains.n_ab:\n self._names = names\n else:\n raise ValueError(\n 'Length of name list must be the same as length of heavy_chains/light chains lists')\n\n elif names is None:\n self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names,\n self._light_chains.names)]\n\n else:\n raise ValueError(\"Names expected a list of strings, instead got {}\".format(type(names)))\n\n self._n_ab = self._light_chains.n_ab\n self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences,\n self._light_chains.sequences)]\n\n # keep the name of the heavy and light chains internally to keep everything in the right order\n self._internal_heavy_name = self._heavy_chains.names\n self._internal_light_name = self._light_chains.names", "def chains(self, release, grouping):\n data = []\n for group in grouping:\n for chain in group['members']:\n data.append({\n 'ife_id': chain['id'],\n 'nr_class_id': group['name']['class_id'],\n 'nr_release_id': release,\n 'rank': chain['rank'],\n 'rep': chain['id'] == group['representative']['id'],\n })\n return data", "def test_sort_chain_multiple_content_decreasing():\n n = 17\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n prev = None\n seen = [False]*n\n for i in range(n):\n assert walker.data in data, \"sort_chain created extraneous data {} given chain with values decreasing\".format(walker.data)\n seen[walker.data] = True\n if prev is not None:\n assert prev.data <= walker.data, \"sort_chain placed {} before {} given chain with values decreasing\".format(prev.data, walker.data)\n prev = walker\n walker = walker.next\n\n for i,b in enumerate(seen):\n assert b, \"sort_chain omitted data value {} from returned chain given chain with values decreasing\".format(i)", "def build_karels():\n build_karel1()\n build_karel2()\n build_karel3()\n build_karel4()" ]
[ "0.6914287", "0.67781395", "0.6644035", "0.6538033", "0.652312", "0.6489876", "0.64195883", "0.63450104", "0.62671566", "0.61932814", "0.60854757", "0.6066701", "0.605091", "0.6029899", "0.6004666", "0.60018355", "0.5970353", "0.59533113", "0.5910838", "0.5885802", "0.58681154", "0.5850139", "0.5832499", "0.5813991", "0.5785004", "0.5756634", "0.5742945", "0.57267576", "0.571478", "0.5712835", "0.56906325", "0.56822395", "0.5680454", "0.5680454", "0.5677697", "0.5661097", "0.5659774", "0.56481636", "0.56470865", "0.56124955", "0.5583753", "0.5583017", "0.5582019", "0.5576012", "0.5552615", "0.5551325", "0.55297786", "0.55240726", "0.5521062", "0.5520168", "0.55103016", "0.5502252", "0.5489771", "0.5474347", "0.5471953", "0.5456601", "0.54532874", "0.545203", "0.5447591", "0.54437846", "0.5438691", "0.54363966", "0.54334", "0.54147273", "0.5408306", "0.5408044", "0.53933126", "0.5370783", "0.53614414", "0.53542477", "0.53501195", "0.53393537", "0.532991", "0.5321903", "0.5319432", "0.5317015", "0.53142726", "0.5306964", "0.5305488", "0.53043336", "0.52970964", "0.5271119", "0.5267757", "0.5264811", "0.52615553", "0.525096", "0.52469736", "0.5246009", "0.5227031", "0.5225457", "0.52239007", "0.52237767", "0.52200496", "0.5218899", "0.5202636", "0.51944506", "0.5193215", "0.5189937", "0.5187752", "0.51843226" ]
0.94273794
0
Test case for post_chain
def test_post_chain(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_chain_search(self):\n pass", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)", "def test_post_foods(self):\n pass", "def test_post_process(self):\n self.executed = False\n\n post_procs = pyamf.POST_DECODE_PROCESSORS[:]\n\n def restore_post_procs():\n pyamf.POST_DECODE_PROCESSORS = post_procs\n\n self.addCleanup(restore_post_procs)\n pyamf.POST_DECODE_PROCESSORS = []\n\n def postprocess(payload, context):\n self.assertEqual(payload, u'foo')\n self.assertEqual(context, {})\n\n self.executed = True\n\n return payload\n\n pyamf.add_post_decode_processor(postprocess)\n\n # setup complete\n bytes = pyamf.encode(u'foo', encoding=pyamf.AMF3).getvalue()\n\n self.decoder.send(bytes)\n ret = next(self.decoder)\n\n self.assertTrue(self.executed)\n self.assertEqual(ret, u'foo')", "def test_get_chains(self):\n pass", "def test_rewrite_chains_stub(self):\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n )\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"]})", "def test_posthardwares(self):\n pass", "def test_tail_call(self):", "def test_tranform_chain() -> None:\n transform_chain = TransformChain(\n input_variables=[\"first_name\", \"last_name\"],\n output_variables=[\"greeting\"],\n transform=dummy_transform,\n )\n input_dict = {\"first_name\": \"Leroy\", \"last_name\": \"Jenkins\"}\n response = transform_chain(input_dict)\n expected_response = {\"greeting\": \"Leroy Jenkins says hello\"}\n assert response == expected_response", "def validate_chain():", "def _test_chain(self, x, class_type_list, kwargs_list, y=None):\n chain, modules = self._create_chain(class_type_list, kwargs_list)\n\n chain = chain.fit(x, y=y)\n self.logger.info(\"Preprocessors chain:\\n{:}\".format(chain))\n\n x_chain = chain.forward(x)\n self.logger.info(\"Trasformed X (chain):\\n{:}\".format(x_chain))\n\n # Train the manual chain and transform\n x_manual = x\n for module in modules:\n module.fit(x_manual, y=y)\n x_manual = module.forward(x_manual)\n\n self.logger.info(\"Trasformed X (manual):\\n{:}\".format(x_manual))\n self.assert_allclose(x_chain, x_manual)\n\n return x_chain", "def test_post_order_traversal(our_bsts):\n bpost = []\n for i in our_bsts[0].post_order():\n bpost.append(i)\n assert bpost == our_bsts[6]", "def _post(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr__post(self, *args, **kwargs)", "def _post(self, *args, **kwargs):\n return _TestA_swig.cleanslate_sptr__post(self, *args, **kwargs)", "def after_test(self, func, *args, **kwargs):\n pass", "def chain_cmd(ctx):\n pass", "def forward_test(self, *args, **kwargs):\n pass", "def test_check_duplication_entry_at_restoring_two_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n ref_entity_2 = Entity.objects.create(name=\"ReferredEntity2\", created_user=self._user)\n ref_entries_2 = [\n Entry.objects.create(name=\"ref2-%d\" % i, created_user=self._user, schema=ref_entity_2)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n attr_info_2 = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries_2[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries_2[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n for attr_name, info in attr_info_2.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=ref_entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity_2)\n\n ref_entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n ref_entries[0].complement_attrs(self._user)\n for attr_name, info in attr_info_2.items():\n attr = ref_entries[0].attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n # sync referral entries from database\n [x.refresh_from_db() for x in ref_entries]\n [x.refresh_from_db() for x in ref_entries_2]\n\n self.assertFalse(ref_entries_2[1].is_active)\n\n # create same name entry\n Entry.objects.create(name=\"ref2-1\", created_user=self._user, schema=ref_entity_2)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def test_post(self):\n pass", "def test_postorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.post_order(lambda x: testlist.append(x))\n assert str(testlist) == str([1, 2, 3, 4, 0])", "def test_post_no_operation(capsys):\n a = K()\n with pytest.raises(TypeError):\n assert a.post_order()", "def test_workflows_post(self):\n pass", "def _postprocess(self):", "def test_process_postpay_accepted(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n result = process_postpay_callback(params)\r\n self.assertTrue(result['success'])\r\n self.assertEqual(result['order'], order1)\r\n order1 = Order.objects.get(id=order1.id) # reload from DB to capture side-effect of process_postpay_callback\r\n self.assertEqual(order1.status, 'purchased')\r\n self.assertFalse(result['error_html'])", "def test_issue_post_issue_reaction(self):\n pass", "def test_user_actions_post(self):\n pass", "def post_processor(self):", "def _post_hooks(self):", "def test_fall_through(self):\n dec = self.actions(self.mock_model, [\"doit\"], fall_through=True)\n req = self.req(\"post\", \"/the/url\", data={\"other\": \"thing\"})\n\n res = self.view(req, decorator=dec)\n\n self.assertEqual(self.mock_model._base_manager.get.call_count, 0)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.request.method, \"POST\")\n self.assertEqual(res.request.POST[\"other\"], \"thing\")", "def testPost(self):\n self.handler.handle = self.mox.CreateMockAnything()\n self.handler.handle('POST', '/my_service', 'method1')\n self.handler.handle('POST', '/my_other_service', 'method2')\n\n self.mox.ReplayAll()\n\n self.handler.post('/my_service', 'method1')\n self.handler.post('/my_other_service', 'method2')\n\n self.mox.VerifyAll()", "def inner_test():\n pass", "def inner_test():\n pass", "def test_post_query_reply_offers(self):\n pass", "def test_process_postpay_exception(self):\r\n baseline = {\r\n 'orderNumber': '1',\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n }\r\n # tests for missing key\r\n for key in baseline:\r\n params = baseline.copy()\r\n del params[key]\r\n result = process_postpay_callback(params)\r\n self.assertFalse(result['success'])\r\n self.assertIsNone(result['order'])\r\n self.assertIn('error_msg', result['error_html'])", "def post_postprocessor(result=None, **kw):\n logger.info(\"start post_postprocessor\")\n logger.info(result)\n logger.info(\"end post_postprocessor\")\n pass", "def test_post_user_post(self):\n pass", "def test_delete_required_chain_stub(self):\n # Exit the graceful restart period, during which we do not stub out\n # chains.\n self.ipt.cleanup(async=True)\n # Install a couple of chains. foo depends on bar.\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"],\n \"bar\": [\"--append bar --jump ACCEPT\"]},\n {\"foo\": set([\"bar\"]),\n \"bar\": set()},\n async=True,\n )\n self.step_actor(self.ipt)\n # Both chains should be programmed as normal.\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [\"--append bar --jump ACCEPT\"] })\n\n # Deleting bar should stub it out instead.\n self.ipt.delete_chains([\"bar\"], async=True)\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"] })", "def test_add_item_using_post(self):\n pass", "def post_process(self, relevant_targets):\r\n pass", "def trip_chain(self):\n pass", "async def intermediate(self, ctx):\n await ctx.send(f'Testing intermediate')", "def create_callback(self, chain):", "def test_post_users_post(self):\n pass", "def test_check_duplication_entry_at_restoring_one_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n\n # create same name entry\n Entry.objects.create(name=\"ref-1\", created_user=self._user, schema=ref_entity)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def test_post_graph(self, graph_entry_class):\n graph_entry_class.return_value.state = \"no-op\"\n graph_entry_class.return_value.path = \"foo/app1\"\n graph_entry_class.return_value.execute.return_value = (0, ['Success'], True)\n\n graph = ApplyGraph('plan', self.graph, self.post_graph, \"bar\")\n\n graph.execute_graph()\n graph.execute_post_graph()\n\n self.assertEqual(\n graph_entry_class.return_value.execute.mock_calls,\n []\n )\n self.assertTrue(len(graph.not_applied) == 2)", "def test_back_and_forth(\n self, items, transform, inverse, post_process_i,\n post_process_transformed, equality=lambda lhs, rhs: lhs == rhs):\n for i in items:\n # We assume these are all tuples after post processing so we can treat\n # them all the same.\n i = post_process_i(i)\n transformed = post_process_transformed(transform(*i))\n inverted = post_process_i(inverse(*transformed))\n self.assertTrue(equality(inverted, i))", "def test_apply_endorsements(self):", "def test_datatransformationsetups_post(self):\n pass", "def test_after_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('inc', 'abcd', 30)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 40)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 50)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are after each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'inject')", "def test_groups_state_post(self):\n pass", "def test_rollback():", "def test_smoker_post(self):\n pass", "def test_state_after_failure(self):\n pass", "def test_post_foods_list(self):\n pass", "def post_backward_generator(self):\n pass", "def test_peers_post(self):\n pass", "def test_settle_tx_known_chain(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=self.ledger_id,\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.decision_maker.ledger_apis, \"transfer\", return_value=\"tx_digest\"\n ):\n tx_digest = self.decision_maker._settle_tx(tx_message)\n assert tx_digest == \"tx_digest\"", "def test_post_order_list(self):\n _expected_list = [13, 5, 103, 57, 23]\n\n _output_list = []\n\n # Call post_order_list to test\n post_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _post_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _post_order_output", "def post():\n raise NotImplementedError", "def _send_stack_mock(self, last_tb, stack, *_args, **_kwargs):\n self.fail('%s, %s' % (last_tb, stack))", "def test_rewrite_existing_chain_remove_stub_dependency(self):\n self.txn.store_rewrite_chain(\"felix-a\", [\"foo\"], set([\"felix-b\"]))\n self.assertEqual(self.txn.affected_chains,\n set([\"felix-a\", \"felix-stub\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([\"felix-stub\"]))\n self.assertEqual(self.txn.referenced_chains, set([\"felix-b\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [\"foo\"],\n \"felix-b\": [],\n \"felix-c\": []\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"])})", "def test_generate_receipt_instructions_callchain(self):\r\n cart = Order.get_cart_for_user(self.user)\r\n item = OrderItem(user=self.user, order=cart)\r\n item.save()\r\n self.assertTrue(cart.has_items())\r\n with patch.object(OrderItem, 'generate_receipt_instructions', self.mock_gen_inst):\r\n cart.generate_receipt_instructions()\r\n self.mock_gen_inst.assert_called_with()", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def test_post_process_forwarder(\n dispatch_post_process_group_task, kafka_message_without_transaction_header\n):\n forwarder = PostProcessForwarderWorker(concurrency=1)\n future = forwarder.process_message(kafka_message_without_transaction_header)\n\n forwarder.flush_batch([future])\n\n dispatch_post_process_group_task.assert_called_once_with(\n event_id=\"fe0ee9a2bc3b415497bad68aaf70dc7f\",\n project_id=1,\n group_id=43,\n primary_hash=\"311ee66a5b8e697929804ceb1c456ffe\",\n is_new=False,\n is_regression=None,\n is_new_group_environment=False,\n queue=\"post_process_errors\",\n group_states=[\n {\"id\": 43, \"is_new\": False, \"is_regression\": None, \"is_new_group_environment\": False}\n ],\n )\n\n forwarder.shutdown()", "def test_wallets_post(self):\n pass", "def test_before_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('set', 'abcd', 0)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 10)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 20)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are before each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'abcd')", "def test_bst_empty_post_order(bst_empty):\n check_list = []\n bst_empty.post_order_trav(lambda x: check_list.append(x.val))\n assert check_list == []", "def test_post_order_0_4(bst_wiki):\n assert tuple(bst_wiki.post_order()) == (1, 3, 2, 5, 6, 4, 8, 9, 7)", "def test_process_postpay_not_accepted(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'REJECT',\r\n 'ccAuthReply_amount': '0.00',\r\n 'reasonCode': '207'\r\n }\r\n result = process_postpay_callback(params)\r\n self.assertFalse(result['success'])\r\n self.assertEqual(result['order'], order1)\r\n self.assertEqual(order1.status, 'cart')\r\n self.assertIn(REASONCODE_MAP['207'], result['error_html'])", "def test_nested_dispatchers():\n\n dispatcher = ntelebot.dispatch.Dispatcher()\n dispatcher.add_command('command', lambda ctx: 'COMMAND')\n\n subdispatcher = ntelebot.dispatch.Dispatcher()\n dispatcher.add(subdispatcher)\n subdispatcher.add_command('subcommand', lambda ctx: 'SUBCOMMAND')\n\n subsubdispatcher = ntelebot.dispatch.Dispatcher()\n subdispatcher.add(subsubdispatcher)\n subsubdispatcher.add_command('subsubcommand', lambda ctx: 'SUBSUBCOMMAND')\n\n dispatcher.add_command('last', lambda ctx: 'LAST')\n\n ctx = MockContext()\n ctx.type = 'message'\n assert dispatcher(ctx) is False\n ctx.command = 'command'\n assert dispatcher(ctx) == 'COMMAND'\n ctx.command = 'subcommand'\n assert dispatcher(ctx) == 'SUBCOMMAND'\n ctx.command = 'subsubcommand'\n assert dispatcher(ctx) == 'SUBSUBCOMMAND'\n ctx.command = 'last'\n assert dispatcher(ctx) == 'LAST'", "def post_execute(self):", "def _proceed(self):\n raise NotImplementedError", "def test_required_deleted_chain_gets_stubbed(self):\n self.txn.store_delete(\"felix-b\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_delete, set())\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-c\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})", "def test_post_order_0_3(bst_right_balance):\n assert tuple(bst_right_balance.post_order()) == (2, 5, 7, 9, 8, 6)", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def test_rewrite_existing_chain_remove_normal_dependency(self):\n self.txn.store_rewrite_chain(\"felix-a\", [\"foo\"], set([\"felix-stub\"]))\n self.assertEqual(self.txn.affected_chains, set([\"felix-a\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([]))\n self.assertEqual(self.txn.referenced_chains, set([\"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [\"foo\"],\n \"felix-b\": [],\n \"felix-c\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-stub\": set([\"felix-a\"])})", "def chain(self, chain):\n\n self._chain = chain", "def test_blog_rollback():", "def postcondition(self, result, exc_info, *args, **kwargs):\n pass", "def test_pre_post_hooks(self):\n os.makedirs('/tmp/localhost/pacha_pre')\n os.makedirs('/tmp/localhost/pacha_post')\n pre_script = open('/tmp/localhost/pacha_pre/foo.sh', 'w')\n pre_script.write('''touch /tmp/localhost/pre_got_executed.txt''')\n pre_script.close()\n post_script = open('/tmp/localhost/pacha_post/bar.sh', 'w')\n post_script.write('''touch /tmp/localhost/post_got_executed.txt''')\n post_script.close()\n run = rebuild.Rebuild(hostname='localhost') \n run.pre_hooks()\n run.post_hooks()\n self.assertTrue(os.path.isfile('/tmp/localhost/post_got_executed.txt'))\n self.assertTrue(os.path.isfile('/tmp/localhost/pre_got_executed.txt'))", "def post(self):\n raise NotImplementedError()", "def test_run_ended(self):", "def post_traverse(self, f, args=()):\n try:\n pairs = self._post_traverse\n except AttributeError:\n raise RuntimeError('post_traverse() may only be called '\n 'during publishing traversal.')\n else:\n pairs.append((f, tuple(args)))", "def chain_cleanup(chain):\n snapshot = chain.take_snapshot()\n yield\n chain.revert_to_snapshot(snapshot)", "def test_post_with_ignore_errors_calls_all_registered_funcs(self):\n def _raise(_):\n raise Exception\n mock_event = Mock()\n mock_subscriptions = [Mock(), Mock(), Mock()]\n mock_subscriptions[0].deliver.side_effect = _raise\n bus = event_bus._event_bus\n for i, subscription in enumerate(mock_subscriptions):\n subscription.order = i\n bus._subscriptions[type(mock_event)] = mock_subscriptions\n\n event_bus.post(mock_event, ignore_errors=True)\n\n for subscription in mock_subscriptions:\n subscription.deliver.assert_called_once_with(mock_event)", "def test_coupledmodels_post(self):\n pass", "def test_chain(mocker):\n transaction = Transaction(\n chain=-1,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)\n\n transaction.chain = 15\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.chain = 257\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)", "def do_step(self) -> None:", "def step_forward(self):", "def test_appended(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper._queue.pop()\n self.assertEqual(expected, actual)", "def test_data_object_post(self):\n pass", "def chain():\n return eth_tester.EthereumTester(eth_tester.PyEVMBackend())" ]
[ "0.72168523", "0.65788776", "0.6171648", "0.6129438", "0.61099774", "0.61021096", "0.60098636", "0.5998518", "0.59712416", "0.5869386", "0.58414704", "0.5824033", "0.5820647", "0.5788468", "0.5751005", "0.5730567", "0.57289803", "0.57287365", "0.5715039", "0.5704063", "0.5691138", "0.56803733", "0.5641229", "0.5613563", "0.5599987", "0.55970144", "0.5593933", "0.55780613", "0.55520594", "0.5549403", "0.5545335", "0.5545335", "0.5541611", "0.55307955", "0.5526904", "0.5453928", "0.54448074", "0.543328", "0.5433229", "0.5427677", "0.54193777", "0.54154557", "0.5391051", "0.5384284", "0.53660613", "0.5337445", "0.532676", "0.5326278", "0.5321156", "0.53010845", "0.52936167", "0.5293004", "0.52843547", "0.52666533", "0.52625614", "0.5259509", "0.5258967", "0.52564466", "0.5249334", "0.52451926", "0.52270377", "0.5219731", "0.52192724", "0.5215283", "0.5215283", "0.5215283", "0.5215283", "0.5215283", "0.521374", "0.52060944", "0.52053857", "0.5203131", "0.5201187", "0.5199311", "0.5196465", "0.51962787", "0.519451", "0.51933485", "0.5176988", "0.516879", "0.516879", "0.516879", "0.516879", "0.51657814", "0.5165681", "0.5165533", "0.51563746", "0.51549506", "0.51438797", "0.5143669", "0.51404655", "0.51389366", "0.5135744", "0.51345754", "0.513413", "0.5131792", "0.5131277", "0.51187706", "0.510949", "0.5099932" ]
0.9139945
0
Test case for post_chain_search
def test_post_chain_search(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_chain(self):\n pass", "def test_post_foods_search(self):\n pass", "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search_systems_post(self):\n pass", "def test_search_organizations_post(self):\n pass", "def test_search(self):\n d = self._search()\n self._response([2, 5, 10])\n self.assertEqual(self.successResultOf(d), [2, 5, 10])", "def test_get_chains(self):\n pass", "def test_analyze_a_recipe_search_query(self):\n pass", "def test_search_recipes(self):\n pass", "def test_autocomplete_recipe_search(self):\n pass", "def processSearchResult(self):", "def test_process_searchbox_with_mock(self):\n\n result = self.client.get('/process_searchbox', data={'zipcode': '94043', 'cuisine': 'indian'})\n self.assertIn(b\"Dosa Paratha\", result.data)", "def test_act_is_searching(self):\n # setup\n self.strategy._is_searching = True\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(1)\n has_attributes, error_str = self.message_has_attributes(\n actual_message=self.get_message_from_outbox(),\n message_type=OefSearchMessage,\n performative=OefSearchMessage.Performative.SEARCH_SERVICES,\n to=self.skill.skill_context.search_service_address,\n sender=str(self.skill.public_id),\n query=self.skill.skill_context.strategy.get_location_and_service_query(),\n )\n assert has_attributes, error_str", "def test_search_systemusers_post(self):\n pass", "def search(self, *args, **kwargs):", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)", "def test_act_not_is_searching(self):\n # setup\n self.strategy._is_searching = False\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def test_determine_search_method(): # ***Incomplete test\n ##########################\n # Arrange.\n query_exten = \"query_exten\"\n db_exten = \"db_exten\"\n\n ##########################\n # Act.\n #x = determine_search_method(query_exten,\n #\t\tdb_exten)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def test_index_page_post(self):\n tester = app.test_client(self)\n response = tester.post('/',data = dict(search_address = \"paris\"), follow_redirects=True)\n print(\"post response code : \",response.status_code)\n self.assertEqual(response.status_code,200)\n assert b\"Moscow Ring Road Distance Finder\" not in response.data", "def test_match_table_post(self):\n pass", "def test_search_test_search_returns_correct_menu(self):\n # create some db records\n dataset = self.create_mixed_test_data()\n test_search_string = 'bravo'\n\n with patch('builtins.input', side_effect=test_search_string):\n result = self.menu.search_text_search()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)", "def test_search(self):\n from rubber import resource\n requestmock = RequestMock()\n resource.requests = requestmock\n\n q = {'query': {'term': {'user': 'kimchy'}}}\n self.Article.elasticsearch.search(q, toto='titi')\n\n self.assertEquals(1, len(requestmock.stack))\n self.assertEquals('http://example.com:9200/tests/article/_search', requestmock.stack[0]['url'])\n self.assertEquals('GET', requestmock.stack[0]['method'])\n self.assertEquals('titi', requestmock.stack[0]['kwargs']['toto'])\n from rubber.instanceutils import data_to_json\n self.assertEquals(data_to_json(q), requestmock.stack[0]['kwargs']['data'])\n\n self.Article.elasticsearch.mapping.put({'some': 'mapping'}, toto='titi')\n\n self.assertEquals(2, len(requestmock.stack))\n self.assertEquals('http://example.com:9200/tests/article/_mapping', requestmock.stack[1]['url'])\n self.assertEquals('PUT', requestmock.stack[1]['method'])\n self.assertEquals('titi', requestmock.stack[1]['kwargs']['toto'])", "def test_candidates_retrieve(self):\n pass", "def test_enable_case_search_reindex(self, fake_factor):\n enable_case_search(self.domain)\n self.assertEqual(fake_factor.call_args, call(domain=self.domain))\n self.assertTrue(fake_factor().build.called)\n self.assertTrue(fake_factor().build().reindex.called)", "def test_search(self):\n with unittest.mock.patch('builtins.input', return_value='a'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='b'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='c'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='d'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='e'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='q'):\n good = self.ec.search()\n self.assertFalse(good)", "def test_search_multiresults(self):\n self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)", "def test_get_foods_search(self):\n pass", "def test_absorbs_naked_a_search(self):\n invenio_search = \"author:ellis\"\n naked_search = \"a ellis\"\n self._compare_searches(invenio_search, naked_search)", "def test_search_publications(self):\n pass", "def test_get_results(self):\n pass", "def test_other_search(url):\n test_clear(url)\n admin_tk = channel_user_create_0(url)[0]\n guest1_tk, guest1_id = channel_user_create_1(url)\n\n test_channels = {\n 'token': admin_tk,\n 'name': 'channel_1',\n 'is_public': True\n }\n requests.post(url + \"channels/create\", json=test_channels)\n \n test_channels = {\n 'token': admin_tk,\n 'name': 'channel_2',\n 'is_public': True\n }\n requests.post(url + \"channels/create\", json=test_channels)\n\n requests.post(url + \"channel/invite\", json={\n 'token': admin_tk,\n 'channel_id': 1,\n 'u_id': guest1_id,\n })\n\n requests.post(url + \"channel/invite\", json={\n 'token': admin_tk,\n 'channel_id': 2,\n 'u_id': guest1_id,\n })\n\n test_admin_message_1 = {\n 'token': admin_tk,\n 'channel_id': 1,\n 'message': 'Hello'\n }\n resp = requests.post(url + \"message/send\", json=test_admin_message_1)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 1\n\n test_guest_message_1 = {\n 'token': guest1_tk,\n 'channel_id': 1,\n 'message': 'A Hello'\n }\n resp = requests.post(url + \"message/send\", json=test_guest_message_1)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 2\n\n test_message_2 = {\n 'token': admin_tk,\n 'channel_id': 2,\n 'message': 'AA Hello'\n }\n resp = requests.post(url + \"message/send\", json=test_message_2)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 3\n\n test_search = {\n 'token': admin_tk,\n 'query_str': 'A'\n }\n resp = requests.get(url + \"search\", params=test_search)\n search_resp = resp.json()\n assert len(search_resp['messages']) == 2\n search_result = [messages['message'] for messages in search_resp['messages']]\n assert search_result == ['A Hello', 'AA Hello']\n\n test_search = {\n 'token': admin_tk,\n 'query_str': 'a'\n }\n resp = requests.get(url + \"search\", params=test_search)\n search_resp = resp.json()\n assert len(search_resp['messages']) == 0\n search_result = [messages['message'] for messages in search_resp['messages']]\n assert search_result == []", "def test_search_3(self):\n\n # search for \"cheese\"\n FrontSearchForm() \\\n .populate_form({'search_box' : 'cheese'}) \\\n .submit_form()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)", "def test_post_foods(self):\n pass", "def test_search_result_count(self):\n user = User.objects.create(username=\"hoge\")\n\n ref_entity = Entity.objects.create(name=\"ref_entity\", created_user=user)\n ref_entry = Entry.objects.create(name=\"ref\", schema=ref_entity, created_user=user)\n\n entity = Entity.objects.create(name=\"entity\", created_user=user)\n for name in [\"foo\", \"bar\"]:\n attr = EntityAttr.objects.create(\n name=name,\n type=AttrTypeValue[\"object\"],\n created_user=user,\n parent_entity=entity,\n )\n attr.referral.add(ref_entity)\n entity.attrs.add(attr)\n\n for i in range(0, 20):\n entry = Entry.objects.create(name=\"e%3d\" % i, schema=entity, created_user=user)\n entry.complement_attrs(user)\n\n if i < 10:\n entry.attrs.get(schema__name=\"foo\").add_value(user, ref_entry)\n else:\n entry.attrs.get(schema__name=\"bar\").add_value(user, ref_entry)\n\n entry.register_es()\n\n resp = Entry.search_entries(user, [entity.id], [{\"name\": \"foo\", \"keyword\": \"ref\"}], limit=5)\n self.assertEqual(resp[\"ret_count\"], 10)\n self.assertEqual(len(resp[\"ret_values\"]), 5)", "def test_submit_for_endorsement(self):", "def test_service_item_search(self):\n\n flag = \"user\"\n api = \"service.item.editsn\"\n #current_page = 1\n search_info = json.dumps({\n })\n sn_pre = '730300010036664'#'730300010034444'\n sn_after = '730300010036665'#'730300010033333'\n result = self.access_api(flag = flag, api = api, sn_pre=sn_pre, sn_after=sn_after)\n #self.assertTrue('data_list' in result)\n #print(result[\"data_list\"])", "def test_book_search_pagination(self):\n c = Client()\n # TODO implement a test\n pass", "def test_get_occurrence(self):\n pass", "def test_search(self):\n tester = app.test_client(self)\n response = tester.post('/search')\n self.assertEqual(response.status_code,200)", "def test_index(self):", "def continue_search( self ):\n return True;", "def test_search_many(self):\n self.engine.insert_data(self.correct_campers_data)\n results = self.engine.search(self.search_many)\n with open(f\"{self.datadir}/results_many.json\") as f:\n self.assertDictEqual(\n json.loads(results),\n json.loads(f.read())\n )", "def test_02_visit_again(self):", "def test_get_chain_by_id(self):\n pass", "def test_search_page_item_in_database(self, mock_search):\n mock_search.return_value = {\"result\": \"result\", \"products\": \"products\"}\n response = self.client.post(\"/search/\", {\"query\": \"test\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/results_page.html\")\n self.assertTemplateUsed(response, \"search/result.html\")\n self.assertEqual(response.context[\"result\"], \"result\")\n self.assertEqual(response.context[\"products\"], \"products\")", "def test_search(self):\n from importCsv.models import City, Hotel\n path = reverse(\"search\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n city = mixer.blend(City, abbrev=\"tes\", name=\"test\")\n mixer.blend(Hotel, city=city, data=\"testData\", name=\"test hotel\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"tes\": \"on\"})\n assert r.status_code == 200\n assert r.content.find(b'test hotel')", "def test_success_search_result(self):\n search_org_api = '/api/org/?search=test'\n auth_client = self.create_auth_client()\n filter_result = ListOrgSerializer(Org.objects.filter(name__startswith='test'), many=True) \n response = auth_client.get(search_org_api)\n self.assertEqual(response.data, filter_result.data)", "def test_post_query_reply_offers(self):\n pass", "def test_search(self):\n self.elasticsearch_cls().search.return_value = {\n 'hits': {\n 'total': 10,\n 'hits': [1, 2, 3, 4],\n },\n '_scroll_id': 'abcd'\n }\n self.elasticsearch_cls().scroll.side_effect = [\n {'hits': {'hits': [5, 6, 7]}},\n {'hits': {'hits': [8, 9, 10]}},\n {'hits': {'hits': []}},\n ]\n\n query = 'this is the query'\n hits = list(self.client.search(query))\n\n self.assertListEqual(\n hits,\n [[1, 2, 3, 4], [5, 6, 7], [8, 9, 10]])", "def test_apply_endorsements(self):", "def test_get_School_search(self):\n school_ids = self.create_School(2,200)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n data = {'first_name': 'Robert', 'last_name': 'Puntitpong','age': 25, 'nationality': 'Cambodia', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n data = {'first_name': 'Alan', 'last_name': 'Manny','age': 20, 'nationality': 'Cambodia', 'school': school_ids[1]}\n response = self.client.post(url, data, format='json')\n\n \"\"\"by school\"\"\"\n response = self.client.get(url + '?search=triam', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 3)\n\n \"\"\"by school\"\"\"\n response = self.client.get(url + '?search=udom', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 3)\n\n \"\"\"Nationality\"\"\"\n response = self.client.get(url + '?search=odia', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n\n \"\"\"last_name\"\"\"\n response = self.client.get(url + '?search=titpong', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n\n \"\"\"first_name and nationality\"\"\"\n response = self.client.get(url + '?search=lan', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def test_01_visit(self):", "def test_search(self):\n\n with self.client as c:\n response = c.get(\"/users?q=al\")\n data = str(response.data)\n\n self.assertIn(\"@alice\", data)\n self.assertIn(\"@alvin\", data)\n\n self.assertNotIn(\"@bob\", data)\n self.assertNotIn(\"@carl\", data)", "def test_framework_selections_post(self):\n pass", "def search():\n pass", "def searchingFinished(self, *args, **kwargs): # real signature unknown\n pass", "def test_load_search(self, mock_get):\n\n c = Client()\n data = {\n 'search_type': self.filter_title,\n 'search_value': self.title\n }\n response = c.post('/taric_books/search/', data=data)\n\n self.assertEqual(response.status_code, 200)", "def __call__(self, item):\n if 'search' not in item:\n return\n if item.get('complete'):\n return\n item.apply('complete', True)\n i_ = item.parent.index(item)\n for j, data in self._search(item['search']):\n item.parent.insert(i_ + 2 + j, sdict(data.items()))", "def test_search_results(self):\n\t\tself.driver.get(\"http://www.google.com\")\n\t\tsearchbox = self.driver.find_element_by_name('q')\n\t\tsearchbox.send_keys(self.searchTerm)\n\t\tsearchbox.send_keys(Keys.RETURN)\n\t\traw_input('\\nenter to continue:')\n\t\t# Get a list of all search results\n\t\tsearchResultsList = self.driver.find_elements_by_tag_name('a')\n\t\tprint \"\\nsearch results list data type = \", type(searchResultsList)\n\t\tprint \"--> len of search results list = \", len(searchResultsList)\n\t\tprint \"--> search term = '%s'\" % self.searchTerm\n\n\t\t## Find first 10 results appearing after 1st instance of search term\n\t\t#indexTermAppears = self.getIndexOfFirstAppearance(searchResultsList)\n\t\t#print \"index Term Appears data type = \", type(indexTermAppears)\n\n\n\n\n\t\t#firstTen = searchResultsList[indexTermAppears:indexTermAppears+10]\n\t\t\n\t\t# Look for the search term in each href of all 10 search results\n\t\tfirstTen = searchResultsList\n\t\tif len(firstTen) == 0:\n\t\t\tself.fail(\"Test failed b/c no links were found\")\n\t\tfailCount = 0\n\t\tpassCount = 0\n\t\tlisthrefs = []\n\n\t\tfor link in firstTen:\n\t\t\tlinkName = link.get_attribute('href')\n\t\t\tprint \"linkName type: \", type(linkName)\n\t\t\t# type check does NOT require quotes for the answer\n\t\t\tif type(linkName) == unicode: ##########################\n\t\t\t\tlisthrefs.append(linkName)\n\t\t\t\tprint \"linkName: \", linkName, \" -- linkName type = \", type(linkName)\n\t\t\t\tif self.searchTerm not in linkName:\n\t\t\t\t\tfailCount += 1\n\t\t\t\t\tprint \"search term: '%s' does NOT appear in %s \" % (self.searchTerm, linkName)\n\t\t\t\telse:\n\t\t\t\t\tpassCount += 1\n\t\t\t\t\tprint \"\\nsearch term: '%s' DOES appear in %s \" % (self.searchTerm, linkName)\n\t\t\t\t\n\t\tprint \"\\npassCount = %d --------- failCount = %d\" % (passCount, failCount)\n\t\t\n\t\t# creat a list of the top 20\n\t\tsearchTermList = []\n\t\tmaxLinks = 20\n\t\tcurrentLinks = 0\n\t\tisTriggered = False\n\t\tnumLinksMissingSearchTerm = 0\n\t\tnumLinksContainingSearchTerm = 0\n\n\t\tfor href in listhrefs:\n\t\t\tif not isTriggered:\n\t\t\t\tif 'automation' in href:\n\t\t\t\t\tisTriggered = True\n\t\t\t\t\tcurrentLinks += 1 \n\t\t\t\t\tsearchTermList.append(href)\n\t\t\telif currentLinks < maxLinks:\n\t\t\t\tsearchTermList.append(href)\n\t\t\t\tcurrentLinks += 1\n\n\t\tprint \"search term list contains the 20 links after first instance of search term\"\n\t\tprint searchTermList\n\n\t\tfor link in searchTermList:\n\t\t\tif 'automation' in link:\n\t\t\t\tnumLinksContainingSearchTerm += 1\n\t\t\telse:\n\t\t\t\tnumLinksMissingSearchTerm += 1\n\t\t\t\tprint \"search term: \", self.searchTerm, \"does NOT appear in \\n - link: \", link\n\n\t\traw_input('enter to continue')\n\n\t\t# Is Fail Count sitll zero?\n\n\t\tself.assertEqual(numLinksMissingSearchTerm, 0)", "def test_load_search_page(self, mock_get):\n\n c = Client()\n data = {\n 'search_type': self.filter_author,\n 'search_value': self.author,\n 'page_value': '2'\n }\n response = c.post('/taric_books/search_page/'\n + self.author + '/' + self.filter_author + '/', data=data)\n\n self.assertEqual(response.status_code, 200)", "def test_search_recipes_by_nutrients(self):\n pass", "def test_search_question(self): \n data = {'searchTerm':'title'}\n res = self.client().post('/search', \n data=json.dumps(data),\n content_type='application/json')\n self.data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n json_res = json.loads(res.get_data(as_text=True))", "def test_nonvouched_search(self):\n url = reverse('search')\n response = self.mozillian_client.get(url)\n eq_(response.status_code, 200)\n eq_(len(response.context['people']), 2)\n\n response = self.mozillian_client.get(\n url, {'q': 'Am', 'include_non_vouched': 1})\n eq_(response.status_code, 200)\n eq_(len(response.context['people']), 3)", "def test_search_4(self):\n\n # search for \"cheese\"\n form = FrontSearchForm()\n form.search_box.set_value('cheese')\n form.submit.click()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)", "def test_search_contract(self):\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n # asserts that there aren't any contracts in changelist\n self.assertNotIn('table', content)\n self.assertIn(\n '<a href=\"/admin/contracts/contract/add/\" class=\"addlink\">',\n content)\n\n # creates two contracts\n payload = self.contract_one_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n payload = self.contract_two_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # checks both of them show up in listing\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertIn('table', content)\n self.assertIn('Sept. 25, 2017', content)\n self.assertIn('Sept. 25, 2018', content)\n self.assertIn(str(self.contract_one_data['rent']), content)\n self.assertIn('Oct. 22, 2017', content)\n self.assertIn('Sept. 22, 2018', content)\n self.assertIn(str(self.contract_two_data['rent']), content)\n\n # searches for contract\n contract = Contract.objects.get(\n property=self.contract_one_data['property'])\n\n contract_two = Contract.objects.get(\n property=self.contract_two_data['property'])\n response = self.client.get(\n '/admin/contracts/contract/?q={}'.format(\n contract.property.city))\n content = response.content\n self.assertIn('table', content)\n self.assertIn(contract.tenant.get_full_name(), content)\n self.assertIn(contract.property.__unicode__(), content)\n self.assertNotIn(contract_two.tenant.get_full_name(), content)\n self.assertNotIn(contract_two.property.__unicode__(), content)", "def test_search_data(self):\n tester = app.test_client(self)\n response = tester.post('/search')\n data = response.get_json()\n self.assertTrue('data' in data)", "def test_perform_search_old(self):\n es_instance_info = {'cluster_name': 'elasticsearch', 'cluster_uuid': 'kPjOcrpMQaWWm4neFdzLrw', 'name': 'f492663fbfa2', 'tagline': 'You Know, for Search', 'version': {'build_date': '2019-04-05T22:55:32.697037Z', 'build_flavor': 'oss', 'build_hash': 'b7e28a7', 'build_snapshot': False, 'build_type': 'tar', 'lucene_version': '8.0.0', 'minimum_index_compa...y_version': '6.0.0-beta1', 'minimum_wire_compat...y_version': '6.7.0', 'number': '6.0.0'}}\n es_query = None\n es_index = None\n es_doc_type = 4\n es_mock = mock.Mock()\n FunctionComponent.perform_search(es_instance_info, es_mock, es_query, es_index, es_doc_type)\n es_mock.search.assert_called_with(doc_type=4, body=None, ignore=[400, 404, 500], index=None)", "def search(self, term):", "def test_postorgs(self):\n pass", "def search(self, query):", "def test_create_saved_app_map_search(self):\n pass", "def test_search_page_item_not_in_database(self, mock_api, mock_search):\n mock_search.side_effect = [LookupError, {\"result\": \"result\",\n \"products\": \"products\"}]\n response = self.client.post(\"/search/\", {\"query\": \"test\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/results_page.html\")\n self.assertTemplateUsed(response, \"search/result.html\")\n self.assertEqual(response.context[\"result\"], \"result\")\n self.assertEqual(response.context[\"products\"], \"products\")", "def question_new_search():", "def test_filter_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'nutella',\r\n 'category': '1',\r\n 'nutriscore': 'd'\r\n })\r\n self.assertTrue(response.context['product_list'])", "def test_post_foods_list(self):\n pass", "def test_searchOn(self):\n self.assertFalse(\n self.server.search_ON(self.earlierQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_ON(self.sameDateQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_ON(self.laterQuery, self.seq, self.msg))", "def test_search_employee_returns_the_correct_menu(self):\n # add some employees to the database\n test_employees = [\n {'id': 1, 'name': \"Test Employee 1\"},\n {'id': 2, 'name': \"Test Employee 2\"}\n ]\n for employee in test_employees:\n e = db_manager.Employee.get_or_create(name=employee['name'])\n # give each employee an associated logentry\n db_manager.LogEntry.create(\n employee=e[0],\n date=datetime.date(2018, 1, 2),\n task_name='Test task {}'.format(employee['id']),\n duration=employee['id'],\n notes='Note'\n )\n user_input = '1'\n with patch('builtins.input', side_effect=user_input):\n result = self.menu.search_employee()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)", "def assert_filter_builds_to(self, expect, filter, _chain_filters=None):\n final_query = {'bool': {'must_not': [RESEARCH.to_dict()]}}\n\n if expect:\n final_query['bool']['must'] = expect\n main, nested = filter.build(_chain_filters)\n assert final_query == main.to_dict()\n\n return main, nested", "def test_all_tweets2(self):\n\n response1 = {'meta': {'result_count': 500, 'next_token': 1}}\n response2 = {'meta': {'result_count': 500, 'next_token': 2}}\n response3 = {'meta': {'result_count': 500, 'next_token': 3}}\n response4 = {'meta': {'result_count': 500}}\n\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n thing = SearchTweets(self.db, f)\n with patch.object(thing, '_SearchTweets__twitter_all_tweets', new_callable=PropertyMock(return_value=True)):\n with patch.object(thing, '_SearchTweets__connect_to_endpoint') as mock_method:\n with patch.object(thing, '_SearchTweets__multi_user', new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(thing, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with self.assertLogs('SEARCH', level='DEBUG') as cm:\n with patch.object(thing, '_SearchTweets__save'):\n mock_method.side_effect = [response1, response2, response3, response4]\n thing.search()\n\n self.assertEqual(mock_method.call_count, 4)\n self.assertTrue(\"DEBUG:SEARCH:ASKING FOR NEXT PAGE\" in cm.output)\n self.assertTrue(\"DEBUG:SEARCH:THERE ARE NO OTHER PAGE AVAILABLE. ALL TWEETS REACHED\" in cm.output)\n self.assertTrue(\"DEBUG:SEARCH:NO NEXT TOKEN IN RESPONSE:INTERRUPTING\" in cm.output)", "def test_workflows_post(self):\n pass", "def _processArgsToLogic_search(args, stdout, stderr) :\n\n if args.forceDownload :\n args.download = True\n # Initiliaze action flags\n args.actionFlags = dict()\n # --query and --listId\n if (args.query is not None) and (args.listId is not None) :\n stdout.write(\"--query and --listId options cannot be specified \"\n \"simultaneously\\n\"\n \"Use --help for details on usage.\")\n sys.exit()\n # --query and no --listId\n elif (args.query is not None) and (args.listId is None) :\n _checkRetmax(args.retmax, stderr)\n _checkEmailOption(args, stderr)\n args.actionFlags[\"DoGenbankSearch\"] = True\n # no --query and --listId\n elif (args.query is None) and (args.listId is not None) :\n _checkEmailOption(args, stderr)\n args.actionFlags[\"DoGetList\"] = True\n # no --query and no --listId\n else :\n assert (args.query is None) and (args.listId is None)\n stderr.write(\"Please specify either --listId or --query\\n\"\n \"Use --help for details on usage.\\n\")\n sys.exit()\n return args", "def test_query_cached(self):\n CreateMatch()\n\n data = {\n \"term1\": \"TESTURL1\",\n \"term2\": \"TESTURL2\"\n }\n response = self.app.post(\n \"/degree\", data=data, follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n\n self.assertIn(\"33 degrees\", res_txt)\n self.assertIn(\"YAAAAY!\", res_txt)", "def test_update_saved_app_map_search(self):\n pass", "def test_no_next_token_2calls(self):\n\n response1 = {'meta': {'result_count': 500, 'next_token': 1}}\n response2 = {'meta': {'result_count': 10}}\n\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n thing = SearchTweets(self.db, f)\n with patch.object(thing, '_SearchTweets__twitter_n_results', new_callable=PropertyMock(return_value=520)):\n with patch.object(thing, '_SearchTweets__connect_to_endpoint') as mock_method:\n with patch.object(thing, '_SearchTweets__multi_user', new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(thing, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with self.assertLogs('SEARCH', level='DEBUG') as cm:\n with patch.object(thing, '_SearchTweets__save'):\n mock_method.side_effect = [response1, response2]\n thing.search()\n\n self.assertEqual(mock_method.call_count, 2)\n self.assertTrue(\"DEBUG:SEARCH:THERE ARE NO OTHER PAGE AVAILABLE. ALL TWEETS REACHED\" in cm.output)\n self.assertTrue(\"DEBUG:SEARCH:NO NEXT TOKEN IN RESPONSE:INTERRUPTING\" in cm.output)", "def testSearchAlgFinishes(self):\n\n class FinishFastAlg(SuggestionAlgorithm):\n _index = 0\n\n def next_trials(self):\n trials = []\n self._index += 1\n\n for trial in self._trial_generator:\n trials += [trial]\n break\n\n if self._index > 4:\n self._finished = True\n return trials\n\n def _suggest(self, trial_id):\n return {}\n\n ray.init(num_cpus=2)\n experiment_spec = {\n \"run\": \"__fake\",\n \"num_samples\": 2,\n \"stop\": {\n \"training_iteration\": 1\n }\n }\n searcher = FinishFastAlg()\n experiments = [Experiment.from_json(\"test\", experiment_spec)]\n searcher.add_configurations(experiments)\n\n runner = TrialRunner(search_alg=searcher)\n self.assertFalse(runner.is_finished())\n runner.step() # This launches a new run\n runner.step() # This launches a 2nd run\n self.assertFalse(searcher.is_finished())\n self.assertFalse(runner.is_finished())\n runner.step() # This kills the first run\n self.assertFalse(searcher.is_finished())\n self.assertFalse(runner.is_finished())\n runner.step() # This kills the 2nd run\n self.assertFalse(searcher.is_finished())\n self.assertFalse(runner.is_finished())\n runner.step() # this converts self._finished to True\n self.assertTrue(searcher.is_finished())\n self.assertRaises(TuneError, runner.step)", "def test02_blog_search_box(self):\n self.info(\"Use Search box in home bage.\")\n self.find_element(\"blogs_home_search\").click()\n search_box = self.find_element(\"blogs_search_box\")\n search_box.send_keys(\"test\")\n search_icon = self.find_element(\"blogs_icon_search\")\n search_icon.click()\n\n self.info(\"Check search box works successfully. \")\n search_results = self.find_element(\"blogs_search_result\")\n results = search_results.find_elements_by_tag_name(\"li\")\n results_text = self.find_element(\"blogs_result_text\")\n self.assertIn(str(len(results)), results_text)\n\n self.info(\"Try to get one of results, should works successfully.\")\n if results:\n random_post = random.choice(results)\n tmp = random_post.text\n post_name = tmp[tmp.find(\":\") + 2 :]\n random_post.find_element_by_tag_name(\"a\").click()\n self.assertIn(post_name, self.driver.current_url)", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.search\"\n current_page = 1\n search_info = json.dumps({\n })\n\n result = self.access_api(flag = flag, api = api, current_page = current_page, search_info = search_info)\n self.assertTrue('data_list' in result)", "def _messageSetSearchTest(self, queryTerms, expectedMessages):\n def search():\n return self.client.search(queryTerms)\n\n d = self.connected.addCallback(strip(search))\n def searched(results):\n self.assertEqual(results, expectedMessages)\n d.addCallback(searched)\n d.addCallback(self._cbStopClient)\n d.addErrback(self._ebGeneral)\n self.loopback()\n return d", "def post_search(self, qs):\n return qs", "def test_search_page(self):\n result = self.client.get(\"/search\")\n self.assertIn(b\"Search\", result.data)", "def test_set_activity_occurrence_results(self):\n pass", "def test_perform_search_new(self):\n es_instance_info = {'cluster_name': 'elasticsearch', 'cluster_uuid': 'kPjOcrpMQaWWm4neFdzLrw', 'name': 'f492663fbfa2', 'tagline': 'You Know, for Search', 'version': {'build_date': '2019-04-05T22:55:32.697037Z', 'build_flavor': 'oss', 'build_hash': 'b7e28a7', 'build_snapshot': False, 'build_type': 'tar', 'lucene_version': '8.0.0', 'minimum_index_compa...y_version': '6.0.0-beta1', 'minimum_wire_compat...y_version': '6.7.0', 'number': '7.0.0'}}\n es_query = None\n es_index = None\n es_doc_type = 4\n\n es_mock = mock.Mock()\n FunctionComponent.perform_search(es_instance_info, es_mock, es_query, es_index, es_doc_type)\n es_mock.search.assert_called_with(body=None, ignore=[400, 404, 500], index=None)", "def test_paginate_no_inputs():\n result = search_paginate()\n assert result == (0, 50)", "def test_run_search__found_and_available(self):\n basics = {\n 'API_URL_ROOT': self.api_url_root,\n 'API_KEY': self.api_key,\n 'PARTNERSHIP_ID': self.partnership_id,\n 'UNIVERSITY_CODE': self.university_code,\n 'LOG_PATH': self.LOG_PATH }\n bd = BorrowDirect( basics )\n bd.run_search( self.patron_barcode, 'ISBN', self.isbn_found_and_available )\n # print bd.search_result\n self.assertEqual( ['Available', 'PickupLocation', 'RequestLink', 'SearchTerm'], sorted(bd.search_result.keys()) )\n self.assertEqual( True, bd.search_result['Available'] )", "def test_validate_search():\n payload = {\"search_input\": \"The input\", \"search_object_types\": []}\n result = validate_search_payload(payload)\n assert result == {}", "def test_search_samples(self):\n self.login()\n\n page_size = 20\n query = 'batch8'\n\n # hit the API endpoint\n data = {'q': query,\n 'page': 1,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.filter(batch__icontains=query).order_by(\"-received\")\n\n # format queryset into json for returning\n serializer = SampleSerializer(expected, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': False\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_search_recipes_by_ingredients(self):\n pass" ]
[ "0.7415727", "0.6913196", "0.66014063", "0.66014063", "0.66014063", "0.65811795", "0.6300726", "0.615566", "0.5992487", "0.5984323", "0.59211963", "0.58675545", "0.58613753", "0.58335793", "0.58033735", "0.5775473", "0.57752", "0.5766603", "0.57302696", "0.57134694", "0.5695927", "0.5666814", "0.5652642", "0.5644926", "0.5625471", "0.56222975", "0.562001", "0.5603048", "0.5600581", "0.55873066", "0.5581915", "0.5567697", "0.5567315", "0.553092", "0.55254585", "0.5522358", "0.55076116", "0.54904526", "0.54876125", "0.5469132", "0.54286635", "0.54221344", "0.5418249", "0.5416213", "0.5415531", "0.54129726", "0.54050153", "0.53970754", "0.5389401", "0.5375912", "0.53640634", "0.5351135", "0.5351135", "0.53495413", "0.5328346", "0.5316686", "0.53163266", "0.5314773", "0.53132", "0.5312445", "0.5304591", "0.52989036", "0.52978545", "0.5297038", "0.5294822", "0.52942353", "0.5287494", "0.52691513", "0.5258729", "0.5254916", "0.5252178", "0.52467954", "0.52428555", "0.5231796", "0.523109", "0.52244824", "0.5221026", "0.52202165", "0.52199256", "0.52173847", "0.52138966", "0.52095747", "0.52035457", "0.5203043", "0.5194166", "0.5193858", "0.51915604", "0.51886183", "0.5182817", "0.51778233", "0.51709545", "0.5168451", "0.5165941", "0.5163524", "0.51616997", "0.5161212", "0.5158584", "0.51509017", "0.5149298", "0.5145858" ]
0.9217483
0
Build a networkx graph object from variables and relations.
def as_networkx_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables]) for r in relations: for p in all_pairs([e.name for e in r.dimensions]): graph.add_edge(*p) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_networkx_bipartite_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables], bipartite=0)\n graph.add_nodes_from([r.name for r in relations], bipartite=1)\n\n for r in relations:\n for e in r.dimensions:\n graph.add_edge(r.name, e.name)\n return graph", "def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G", "def parse_graph(self):\n\t\tnx_graph = nx.Graph()\n\t\tfor node in self.vertices:\n\t\t\tnx_graph.add_node(node)\n\n\t\tfor edge in self.edges:\n\t\t\tnode1, node2, weight = edge\n\t\t\tnx_graph.add_edge(node1, node2, weight=weight)\n\n\t\treturn nx_graph", "def build_graph(self):\n pass", "def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g", "def _build_graph(self):\n pass", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def build_graph(graph_dict): \n #make networkX graph\n G = nx.Graph()\n G.add_nodes_from(graph_dict.keys())\n for key in graph_dict:\n for i in range(len(graph_dict[key])):\n G.add_edge(key,graph_dict[key][i])\n return G", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g", "def _create_nx_graph(self):\n #_graph = nx.Graph()\n graph = nx.DiGraph()\n for name, lemma in self._lemmas_info.get_parent_lemmas():\n added_children = []\n for child_n in lemma.evidence_lemmas:\n child_node = str(child_n)\n if not self._should_be_filtered( added_children, child_node ):\n added_children.append( child_node )\n \n graph.add_node( name ) # it's OK if it exists from the previous iteration\n graph.add_node( child_node )\n # lemma1 because lemma2, means that lemma2 -> lemma1\n graph.add_edge( child_node, name )\n \n self._append_source_and_target( graph )\n return graph", "def build_graph(self):\n raise NotImplementedError", "def _get_graph(nodes: Nodes, edges: np.ndarray):\n\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n\n return graph", "def get_graph(self) -> nx.classes.graph.Graph:\n G = nx.Graph()\n # add nodes\n G.add_nodes_from([(room, props) for room, props in self.get_rooms_with_properties().items()])\n # add edges\n G.add_edges_from(self.get_edges_with_properties())\n return G", "def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])", "def build_graph(self):\n self.graph = tf.Graph()\n temp_connections = self.connections\n with self.graph.as_default():\n operations = {}\n\n # create Variables for input vertices\n for neuron_id in self.input_neurons:\n self.inputs[neuron_id] = tf.get_variable(name=str(neuron_id), shape=(),\n initializer=tf.zeros_initializer)\n deletion = []\n while len(temp_connections) > 0:\n for neuron_id in deletion:\n temp_connections.pop(neuron_id, None)\n deletion = []\n keys = list(temp_connections)\n random.shuffle(keys)\n # create input & output vertices\n for neuron_id in temp_connections:\n input_neuron_ids = temp_connections[neuron_id]\n if self.check(input_neuron_ids, operations):\n # weights\n v_weights = tf.constant(self.weights[neuron_id])\n # input vertices\n v_inputs = []\n\n for input_neuron_id in input_neuron_ids:\n if self.is_input_neuron(input_neuron_id):\n vertex = self.inputs[input_neuron_id]\n else:\n vertex = operations[input_neuron_id]\n\n v_inputs.append(vertex)\n\n deletion.append(neuron_id)\n\n # multiply weights and inputs\n mul = tf.multiply(v_inputs, v_weights, str(neuron_id))\n # sum multiplied values\n sum = tf.reduce_sum(mul, name='sum_' + str(neuron_id))\n # apply activation function\n if self.is_output_neuron(neuron_id):\n activation = tf.sigmoid(sum, name=\"output\")\n else:\n activation = tf.nn.leaky_relu(sum, alpha=0.2, name=\"relu_\" + str(neuron_id))\n\n operations[neuron_id] = activation\n if self.is_output_neuron(neuron_id):\n self.output = activation\n return self.graph, self.inputs, self.output", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph", "def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G", "def _construct_graph(self):\n raise NotImplementedError", "def build_graph(friends: list, mutuals: dict) -> nx.classes.graph.Graph:\n friends_ids = [friend['id'] for friend in friends]\n G = nx.Graph()\n G.add_nodes_from(range(len(friends_ids)))\n\n for idx in tqdm(friends_ids):\n node_id = friends_ids.index(idx)\n G.nodes[node_id]['vk_id'] = idx\n G.nodes[node_id]['first_name'] = friends[node_id]['first_name']\n G.nodes[node_id]['last_name'] = friends[node_id]['last_name']\n G.nodes[node_id]['gender'] = friends[node_id]['sex']\n G.nodes[node_id]['relation'] = friends[node_id].get('relation')\n G.nodes[node_id]['city'] = friends[node_id].get('city', {}).get('title')\n G.nodes[node_id]['country'] = friends[node_id].get('country', {}).get('title')\n G.nodes[node_id]['schools'] = friends[node_id].get('schools')\n G.nodes[node_id]['universities'] = friends[node_id].get('universities')\n G.nodes[node_id]['career'] = friends[node_id].get('career')\n idx_mutuals = mutuals.get(idx)\n if idx_mutuals != None:\n edges = [(node_id, friends_ids.index(friend_id)) for friend_id in idx_mutuals]\n G.add_edges_from(edges)\n\n return G", "def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]", "def build_graph(self, nodes, edges):\n\n log.info(\"Building Graph with [%s] nodes and [%s] edges\" % ('NOT_IMPLEMENTED', 'NOT_IMPLEMENTED'))\n\n for node, node_type in nodes.items():\n self.graph.node(node, node.replace('tmp_git_repo/', '', 1), color=NODE_COLORS[node_type])\n\n for left_edge, right_edges in edges.items():\n for right_edge in right_edges:\n self.graph.edge(left_edge, right_edge.import_path, label=right_edge.import_name)", "def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)", "def _create_graph(netlist):\n G = nx.Graph()\n for t in netlist:\n G.add_edges_from([(t.name, t.drain), (t.name, t.gate), (t.name, t.source)])\n return G", "def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()", "def build_graph(nodes):\n\n job_instances_map = {}\n\n # first create node structure\n nodes_map = {}\n root_nodes = []\n for node in nodes:\n new_node = JobGraphNode(node, job_instances_map)\n nodes_map[node.id] = new_node\n # check if it is root node\n try:\n node.relationships.next()\n except StopIteration:\n root_nodes.append(new_node)\n\n # then set relationships\n for _, child in nodes_map.iteritems():\n for relationship in child.cfy_node.relationships:\n parent = nodes_map[relationship.target_node.id]\n parent.add_child(child)\n child.add_parent(parent)\n\n return root_nodes, job_instances_map", "def to_NetworkX(nodes, edges, attributes=None):\n \n import networkx as nx\n # convert to dataframe if numpy array\n if isinstance(nodes, np.ndarray):\n nodes = coords_to_df(nodes)\n if isinstance(edges, np.ndarray):\n edges = pairs_to_df(edges)\n \n G = nx.from_pandas_edgelist(edges)\n if attributes is not None:\n for col in attributes.columns:\n # only for glm extension file:\n # nx.set_node_attributes(G, attributes[col].to_dict(), col.replace('+','AND')) \n nx.set_node_attributes(G, attributes[col].to_dict(), col)\n return G", "def __toNetworkX(self):\n G = nx.Graph()\n G.add_nodes_from(range(self.n))\n for u in range(self.n):\n for v in range(self.n):\n if self.adjacent(u, v):\n G.add_edge(u, v)\n\n return G", "def build_graph(self, graph, inst_name, port_nets):\n return", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n for nd1, nd2, attrs in G.edges(data=True):\n # G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def graph(self):\n graph = nx.DiGraph()\n for name, joint in self.joints.items():\n graph.add_edge(*joint.connects, joint=name)\n return graph", "def create_graph_with_nodes(src_nodes, get_id: callable, get_attrs: callable):\n graph = nx.MultiDiGraph()\n for node in src_nodes:\n graph.add_node(get_id(node), **get_attrs(node))\n return graph", "def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2", "def gen_graph(self):", "def network(self):\n G = nx.MultiDiGraph()\n reaction_hash = []\n product_count = 0\n mapping = {}\n reaction_count = 0\n\n for r in self.reactions:\n reaction_count += 1\n\n reaction_dict = r.__dict__\n G.add_edge(reaction_dict.get('left'), hash(r))\n G.add_edge(reaction_dict.get('right'), hash(r))\n G.add_edge(hash(r), reaction_dict.get('left2'))\n G.add_edge(hash(r), reaction_dict.get('right2'))\n\n product_count += 1\n mapping[reaction_dict.get('left')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('left2')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right2')] = \"x{}\".format(product_count)\n\n mapping[hash(r)] = \"r{}\".format(reaction_dict.get(\"reaction_n\"))\n reaction_hash.append(hash(r))\n\n return G, mapping", "def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)", "def from_onnx(self, graph):\n # parse network inputs, aka parameters\n for init_tensor in graph.initializer:\n if not init_tensor.name.strip():\n raise ValueError(\"Tensor's name is required.\")\n self._params[init_tensor.name] = self._parse_array(init_tensor)\n\n # converting GraphProto message\n for i in graph.input:\n if i.name in self._params:\n # i is a param instead of input\n name_param = 'param_{}'.format(self._num_param)\n self._num_param += 1\n self._params[name_param] = self._params.pop(i.name)\n self._nodes[name_param] = mx.sym.Variable(name=name_param,\n shape=self._params[name_param].shape)\n self._renames[i.name] = name_param\n else:\n name_input = 'input_{}'.format(self._num_input)\n self._num_input += 1\n self._nodes[name_input] = mx.sym.Variable(name=name_input)\n self._renames[i.name] = name_input\n\n # constructing nodes, nodes are stored as directed acyclic graph\n # converting NodeProto message\n for node in graph.node:\n op_name = node.op_type\n node_name = node.name.strip()\n node_name = node_name if node_name else None\n onnx_attr = self._parse_attr(node.attribute)\n new_op, mx_attr = _convert_operator(op_name, onnx_attr)\n inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]\n\n # some workarounds for onnx problem\n mx_attr = self._fix_bias(new_op, mx_attr, len(inputs))\n mx_attr = self._fix_channels(new_op, mx_attr, list(node.input))\n self._fix_bias_shape(node.op_type, node.input, onnx_attr)\n\n # calling again to get new symbols after some workarounds\n inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]\n\n # onnx's Gemm operator also supports broadcasting C input which\n # mxnet's equivalent linalg_gemm doesn't. So using combination of\n # transpose and FullyConnected operators.\n if op_name == 'Gemm':\n new_op, inputs, mx_attr = self._fix_gemm('FullyConnected', inputs, onnx_attr)\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(inputs, mx_attr)\n elif op_name == 'AveragePool' and onnx_attr.get('pads') is not None or \\\n op_name == 'MaxPool' and onnx_attr.get('pads') is not None:\n op = self._fix_pooling(op_name, inputs, onnx_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(inputs, mx_attr)\n else:\n op = new_op(name=node_name, *inputs, **mx_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n\n assert len(node_output) == len(op.list_outputs()), (\n \"Number of output mismatch {} vs {} in {}.\".format(\n len(node_output), len(op.list_outputs()), op_name))\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n # now return the outputs\n out = [self._nodes[i.name] for i in graph.output]\n if len(out) > 1:\n out = mx.sym.Group(out)\n else:\n out = out[0]\n return out, self._params", "def build_inference_graph(self):\n self.build_train_graph()", "def CreateGraph(Points, Edges):\n G = nx.Graph()\n newG, Nodes = CreateVertices(Points, G)\n Graph = CreateEdges(Nodes, Edges, newG)\n return Graph", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def build_2_node_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_edge(1, 2)\n\n return graph", "def generate_dag_graph(self):\n # generate ranom graph\n G = nx.DiGraph()\n G.add_nodes_from(range(self.nodes))\n return self.fix_graph(G)", "def nx_graph_from_dot_file(dot_file_path):\n # this does not understand dot statements like X->Y,Z;\n # nx_graph = nx.nx_pydot.read_dot(dot_file_path)\n\n nodes, edges = DotTool.read_dot_file(dot_file_path)\n g = nx.DiGraph()\n g.add_edges_from(edges)\n\n return g", "def get(self):\n self.network = gt.load_graph(self.dotfile)\n\n if self.strongcomponent:\n self.network=gt.extract_largest_component(\n self.network, directed=True, prune=True)\n\n if self.removeselfloops:\n gt.remove_self_loops(self.network)\n\n self.nm = self.network.new_vertex_property(\"string\")\n nm2 = self.network.new_vertex_property(\"string\")\n self.hl = self.network.new_vertex_property(\"bool\")\n self.network.vertex_properties[\"text\"] = self.nm\n self.network.vertex_properties[\"text\"] = nm2\n names=[]\n for v in self.network.vertices():\n if v.out_degree() > -1:\n self.nm[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.hl[v]=False\n else:\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.nm[v]=''\n self.hl[v]=False\n names=names+[nm2[v]]\n\n NAMES=pd.Series(list(set(names)),\n name='varclass').reset_index().set_index('varclass')\n self.varclass = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"varclass\"] = self.varclass\n for v in self.network.vertices():\n self.varclass[v]=NAMES.loc[nm2[v]].values[0]\n\n self.od = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.od\n for v in self.network.vertices():\n self.od[v]=self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=self.exponent)+5\n self.ods = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.ods\n for v in self.network.vertices():\n self.ods[v]=1*self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=1)+2\n\n self.ew = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight\"] = self.ew\n for e in self.network.edges():\n self.ew[e]=float(self.network.ep.weight[e])**1\n\n self.ew_pen = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight_pen\"] = self.ew_pen\n for e in self.network.edges():\n self.ew_pen[e]=4/(1 + np.exp(-.05-np.fabs(float(self.network.ep.weight[e]))))\n\n self.e_marker = self.network.new_edge_property(\"string\")\n self.network.edge_properties[\"e_marker\"] = self.e_marker\n for e in self.network.edges():\n if float(self.network.ep.weight[e]) < 0:\n self.e_marker[e]='bar'\n else:\n self.e_marker[e]='arrow'\n\n self.deg = self.network.degree_property_map(\"out\")\n\n self.ecol = self.network.new_edge_property(\"vector<double>\")\n self.network.edge_properties[\"ecol\"] = self.ecol\n for e in self.network.edges():\n col=cm.ScalarMappable(mpl.colors.Normalize(vmin=-self.edgecollim,\n vmax=self.edgecollim),\n cmap=self.edgecolmap).to_rgba(float(self.ew[e]))\n col=list(col)\n col[3]=self.edgealpha\n self.ecol[e]=tuple(col)\n\n self.pos = gt.graphviz_draw(self.network,\n overlap=False,\n vsize=20,\n sep=self.nodesep,\n output=None)\n\n self.control = self.network.new_edge_property(\"vector<double>\")\n for e in self.network.edges():\n d = np.sqrt(np.sum((self.pos[e.source()].a\n - self.pos[e.target()].a) ** 2))\n d=d/2\n self.control[e] = [0.0,0.0,0, .2*d, 0.5, d,1,0]\n\n if self.outfile is not None:\n gt.graph_draw(self.network,nodesfirst=False,\n pos=self.pos,\n vertex_halo=self.hl,\n vertex_halo_color=[.2,.2,.2,.1],\n edge_pen_width=self.ew_pen,\n edge_end_marker=self.e_marker,\n vorder=self.deg,\n edge_marker_size=10,\n vertex_color=self.varclass,#[.5,.5,.5,.3],\n edge_color=self.ecol,#[.5,.5,.5,.5],\n vertex_pen_width=1.5,\n vertex_size=self.od,\n vertex_text=self.nm,\n vcmap=(self.cmap,self.alpha),\n edge_control_points=self.control,\n vertex_fill_color=self.varclass,#deg,\n vertex_font_size=self.ods,\n vertex_text_color=[.1,.1,.1,.8],\n #vertex_text_position=0,\n output=self.outfile)", "def build_graph_from_triplets(num_nodes, num_rels, triplets):\n g = dgl.DGLGraph()\n g.add_nodes(num_nodes)\n src, rel, dst = triplets\n src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))\n rel = np.concatenate((rel, rel + num_rels))\n edges = sorted(zip(dst, src, rel))\n dst, src, rel = np.array(edges).transpose()\n g.add_edges(src, dst)\n norm = comp_deg_norm(g)\n return g, rel, norm", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def nx2gt(nxG):\n # Phase 0: Create a directed or undirected graph-tool Graph\n gtG = Graph(directed=nxG.is_directed())\n\n # Add the Graph properties as \"internal properties\"\n for key, value in list(nxG.graph.items()):\n # Convert the value and key into a type for graph-tool\n tname, value, key = get_prop_type(value, key)\n\n prop = gtG.new_graph_property(tname) # Create the PropertyMap\n \n gtG.graph_properties[key] = prop # Set the PropertyMap\n gtG.graph_properties[key] = value # Set the actual value\n\n # Phase 1: Add the vertex and edge property maps\n # Go through all nodes and edges and add seen properties\n # Add the node properties first\n nprops = set() # cache keys to only add properties once\n for node, data in nxG.nodes(data=True):\n\n # Go through all the properties if not seen and add them.\n for key, val in list(data.items()): \n if key in nprops: continue # Skip properties already added\n\n # Convert the value and key into a type for graph-tool\n tname, _, key = get_prop_type(val, key)\n\n prop = gtG.new_vertex_property(tname) # Create the PropertyMap\n gtG.vertex_properties[key] = prop # Set the PropertyMap\n\n # Add the key to the already seen properties\n nprops.add(key)\n\n # Also add the node id: in NetworkX a node can be any hashable type, but\n # in graph-tool node are defined as indices. So we capture any strings\n # in a special PropertyMap called 'id' -- modify as needed!\n gtG.vertex_properties['id'] = gtG.new_vertex_property('string')\n\n # Add the edge properties second\n eprops = set() # cache keys to only add properties once\n for src, dst, data in nxG.edges(data=True):\n\n # Go through all the edge properties if not seen and add them.\n for key, val in list(data.items()): \n if key in eprops: continue # Skip properties already added\n\n # Convert the value and key into a type for graph-tool\n tname, _, key = get_prop_type(val, key)\n \n prop = gtG.new_edge_property(tname) # Create the PropertyMap\n gtG.edge_properties[key] = prop # Set the PropertyMap\n\n # Add the key to the already seen properties\n eprops.add(key)\n\n # Phase 2: Actually add all the nodes and vertices with their properties\n # Add the nodes\n vertices = {} # vertex mapping for tracking edges later\n for node, data in nxG.nodes(data=True):\n\n # Create the vertex and annotate for our edges later\n v = gtG.add_vertex()\n vertices[node] = v\n\n # Set the vertex properties, not forgetting the id property\n data['id'] = str(node)\n for key, value in list(data.items()):\n gtG.vp[key][v] = value # vp is short for vertex_properties\n\n # Add the edges\n for src, dst, data in nxG.edges(data=True):\n\n # Look up the vertex structs from our vertices mapping and add edge.\n e = gtG.add_edge(vertices[src], vertices[dst])\n\n # Add the edge properties\n for key, value in list(data.items()):\n gtG.ep[key][e] = value # ep is short for edge_properties\n\n # Done, finally!\n return gtG", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def build_graph(num_accounts):\n\n all_requirements = []\n\n # Syslog account requirements\n syslog_account = 'syslog'\n all_requirements.append(CreateAccount('Create [Syslog]', syslog_account))\n all_requirements.append(AdminAccess('Admin Access [Syslog]', syslog_account))\n all_requirements.append(S3Bucket('S3 Bucket [Syslog]', syslog_account))\n all_requirements.append(SQSQueue('SQS Queue [Syslog]', syslog_account))\n\n # PDU account(s) requirements\n for n in range(1, num_accounts+1):\n pdu_account = \"PDU{}\".format(n)\n all_requirements.append(CreateAccount(\"Create [{}]\".format(pdu_account), pdu_account))\n all_requirements.append(AdminAccess(\"Admin Access [{}]\".format(pdu_account), pdu_account))\n all_requirements.append(CloudTrailSNSTopic(\"CloudTrail SNS [{}]\".format(pdu_account), pdu_account))\n all_requirements.append(CloudTrailTrail(\"CloudTrail Trail [{}]\".format(pdu_account), pdu_account))\n\n # Build graph based on each requirement's dependencies\n g = nx.DiGraph()\n for req in all_requirements:\n print(\"Adding node '{}'\".format(req))\n g.add_node(req)\n dependencies = req.get_dependencies(all_requirements)\n for dep in dependencies:\n print(\"Adding edge from '{}' to '{}'\".format(dep, req))\n g.add_edge(dep, req)\n return g", "def populate_graph(self):", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def __graph__(self):\n\n graph = rdflib.Graph()\n for prefix, name in self.rml.namespaces():\n graph.namespace_manager.bind(prefix, name)\n return graph", "def as_graph(self, graph=None):\n # at this level it works but what if we have nested structures?\n # What is a graph if not a set of links? Why do not we put all into a graph?\n if not graph:\n graph = nx.Graph()\n\n for link in self.sequence:\n logging.info(link)\n (l, r) = link.value\n (ln, rn) = link.name\n logging.info (\"Node: %s %s \" % (l.name, str(l.shannon)))\n graph.add_node(l.name, shannon=l.shannon, IC=l.IC)\n logging.info (\"Node: %s %s \" % (r.name, str(r.shannon)))\n graph.add_node(r.name, shannon=r.shannon, IC=r.IC)\n logging.info (\"Edge: %s %s %s \" % (l.name, r.name, str(link.PMI)))\n graph.add_edge(l.name, r.name, pmi=link.PMI)\n\n return graph", "def build_graph(self, graph, inst_name, port_nets):\n self.add_graph_edges(graph, port_nets)", "def construct_graph(social_edges, spatial_edges, output_path=None):\n G = nx.DiGraph()\n with open(social_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], USER_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n business_nodes = set([])\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n lat = float(edge[2])\n lng = float(edge[3])\n if edge[-2] not in business_nodes:\n G.add_node(BUSINESS_NODE_PREFIX + edge[-2], spatial={'lat': lat, 'lng': lng})\n business_nodes.add(edge[-2])\n\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], BUSINESS_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n if output_path:\n pickle.dump(G, open(output_path, 'w'))\n return G", "def graph_from_dict(self,\n data,\n source_label,\n target_label,\n data_source='local',\n source_attributes=[],\n target_attributes=[]):\n G = nx.Graph()\n for row in data:\n if source_label not in row or target_label not in row:\n continue\n source = row[source_label]\n target = row[target_label]\n\n if source == \"\" or target == \"\":\n continue\n\n G.add_edge(source, target)\n G.edges[source, target][data_source] = row\n G.nodes[source][data_source] = {}\n G.nodes[target][data_source] = {}\n for attribute in source_attributes:\n G.nodes[source][data_source][attr] = data[attr]\n for attribute in target_attributes:\n G.nodes[target][data_source][attr] = data[attr]\n\n return G", "def __init__(self, variables, constraints):\n self.variables = variables\n self.constraints = constraints\n for c in constraints:\n c.var1.peers.append(c.var2)\n c.var2.peers.append(c.var1)", "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def prepare_graph(label, nodes, edges, graphID):\n features = {'label': label}\n\n G = nx.DiGraph()\n nodes[\"id\"] = nodes[\"id\"].apply(lambda x : str(x))\n features['num_nodes'] = nodes.shape[0]\n op_node = None\n times = []\n friends = []\n followers = []\n for index, row in nodes.iterrows():\n G.add_node(row['id'], time=row['time'], friends=row['friends'], followers = row['followers'])\n times.append(row['time'])\n friends.append(2**row['friends'])\n followers.append(2**row['followers'])\n if row['time'] == 0:\n features['poster_friend_cnt'] = 2**row['friends']\n features['poster_follower_cnt'] = 2**row['followers']\n tweeter_id = row['id']\n op_node = row['id']\n features['avg_time'] = np.mean(times)\n features['avg_friends'] = np.mean(friends)\n features['avg_followers'] = np.mean(followers)\n features['max_followers'] = max(followers)\n features['max_friends'] = max(friends)\n features['friends_25th_percentile'] = np.percentile(friends, 25)\n features['friends_75th_percentile'] = np.percentile(friends, 75)\n features['followers_25th_percentile'] = np.percentile(followers, 25)\n features['followers_75th_percentile'] = np.percentile(followers, 75)\n node_list = []\n edge_count = 0\n for pair in edges:\n node1, node2 = pair.split()[0], pair.split()[1]\n node_list.append(node1)\n node_list.append(node2)\n G.add_edge(node1, node2)\n edge_count += 1\n features['num_edges'] = edge_count\n sum_users_without_followers = sum([1 for (node, val) in G.in_degree() if val==0])\n features['ratio_users_w/out_followers'] = sum_users_without_followers / len(G.nodes)\n features['num_connected_components'] = nx.number_strongly_connected_components(G)\n features['number_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id)\n features['percentage_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id) / features['poster_follower_cnt']\n features['avg_clustering'] = nx.average_clustering(G)\n features['op_clustering'] = nx.clustering(G,op_node)\n features['transitivity'] = nx.transitivity(G)\n node_list = list(set(node_list))\n features['nodeID_list'] = np.array(node_list)\n features['graph_id'] = graphID\n return features, G", "def build_3_node_line_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_node()\n\n graph.new_edge(1, 2)\n graph.new_edge(2, 3)\n\n return graph", "def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(axis=-1)\n\n return graph_from_edges(variable_names, dist_func, edges)", "def make_join_graph(parsed_join_clauses):\n g = nx.Graph()\n for t1, c1, t2, c2 in parsed_join_clauses:\n g.add_node(t1)\n g.add_node(t2)\n if not g.has_edge(t1, t2):\n g.add_edge(t1, t2, join_columns={t1: [c1], t2: [c2]})\n else:\n edge = g[t1][t2]\n edge[\"join_columns\"][t1].append(c1)\n edge[\"join_columns\"][t2].append(c2)\n return g", "def nx_to_neo4j(nx_graph=None):\n\n if not nx_graph:\n nx_graph = json_to_nx(\"LegalKnowledgeGraph.json\")\n authenticate(ENV[\"DB_URL\"].replace(\"http://\", \"\"), ENV[\"DB_USERNAME\"],ENV[\"DB_PASSWORD\"]) # Accessing the NEO4J server\n neo4j_graph = Graph(ENV[\"DB_URL\"]+\"/db/data/\")\n string_to_instance_mapping = dict()\n\n list_node = list(nx_graph.nodes(data=True))\n for i in range(len(list_node)):\n node_instance = Node(list_node[i][1][\"type\"], id=list_node[i][0])\n string_to_instance_mapping[list_node[i][0]] = node_instance\n\n list_edges=list(nx_graph.edges())\n for i in range(nx_graph.number_of_edges()):\n source_node_instance = string_to_instance_mapping[list_edges[i][0]]\n target_node_instance = string_to_instance_mapping[list_edges[i][1]]\n b = Relationship(source_node_instance, \"MAPS TO\", target_node_instance)\n neo4j_graph.create(b)", "def reconstruct_graph(inputs, outputs, tag=None):\r\n if tag is None:\r\n tag = ''\r\n nw_inputs = [safe_new(x, tag) for x in inputs]\r\n givens = OrderedDict()\r\n for nw_x, x in izip(nw_inputs, inputs):\r\n givens[x] = nw_x\r\n allinputs = theano.gof.graph.inputs(outputs)\r\n for inp in allinputs:\r\n if isinstance(inp, theano.Constant):\r\n givens[inp] = inp.clone()\r\n\r\n nw_outputs = clone(outputs, replace=givens)\r\n return (nw_inputs, nw_outputs)", "def graph(pkgs: Dict[str, BinaryPackage]) -> nx.DiGraph:\n # To get build dependencies from a source package\n build_deps = {}\n # To get the source package for a binary package\n src_pkgs = {}\n # To get the directory for a source package\n directories = {}\n\n # Build maps mentioned above\n for _, pkg in pkgs.items():\n build_deps[pkg.source] = pkg.build_deps\n src_pkgs[pkg.name] = pkg.source\n directories[pkg.source] = pkg.dir\n\n # Convert binary build dependencies to local directories\n for src, d in directories.items():\n src_deps = []\n for dep in build_deps[src]:\n if dep in src_pkgs:\n src_deps.append(directories[src_pkgs[dep]])\n build_deps[src] = src_deps\n\n # Create graph from build dependencies\n dep_graph = nx.DiGraph()\n for src, d in directories.items():\n dep_graph.add_node(d)\n dep_graph.add_nodes_from(build_deps[src])\n for dep in build_deps[src]:\n dep_graph.add_edge(d, dep)\n\n return dep_graph", "def __init__(self, variables, constraints):\n self.__variables = variables\n self.__constraints = constraints\n\n self.__make_node_consistent()", "def _CreateAdjacencyListGraph(self):\n graph = dict()\n for nodes in self.nodes:\n graph[nodes[0]] = set()\n for edges in self.edges:\n graph[edges[0]].add(edges[1])\n return graph", "def _create_reference_connectivity_graph(self):\n #take the self._residue_graph and create a replicate (without the misc attributes) with the atoms_with_positions\n _reference_connectivity_graph = nx.Graph()\n atoms_with_positions = set(self._atoms_with_positions)\n\n #iterate over all the bonds\n for bond in self._residue_graph.edges():\n if set(bond).issubset(atoms_with_positions):\n #if both of the atoms in the bond are in atoms_with_positions, we can add the atoms/bonds to the reference\n _reference_connectivity_graph.add_edge(*bond)\n\n return _reference_connectivity_graph", "def build_graph(self):\n if self.model == 'dense':\n # ForecastNet with two densely connected hidden layers in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv':\n # ForecastNet with a convlutional neural network in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_conv_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'dense2':\n # ForecastNet with two densely connected hidden layers in a cell and linear outputs\n self.outputs, self.cost = forecastnet_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv2':\n # ForecastNet with a convolutional neural network in a cell and linear outputs\n self.outputs, self.cost = forecastnet_conv_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)", "def build_graph(self, objects_: Union[Dict[str, List[Union[str, dict, int]]], NamedTuple],\n root_name: str, *branch_names: str) -> list:\n\n all_nodes = []\n for key, value in objects_.items():\n for index, items in enumerate(value):\n if isinstance(items, dict):\n vertices = self.initialize_vertices(objects_=items,\n root_name=root_name,\n independent=False,\n group=index)\n all_nodes.extend(vertices)\n continue\n\n root = set()\n\n for vertex in all_nodes:\n if vertex[root_name]:\n root.add(vertex[root_name])\n\n for branch in branch_names:\n if not isinstance(branch, str):\n raise TypeError(f\"Expected a string, got {type(branch)}\")\n\n try:\n if vertex[branch]:\n branches = self.locate_graph_link(vertex_list=all_nodes,\n branch_name=branch)\n except KeyError:\n continue\n\n share = self.initialize_connection(branch_iterable=branches, root_iterable=root)\n\n return self.find_nodes_subtype(root_name)", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def build_graph(self):\n return nn.Sequential(\n nn.Linear(self.input_dim, self.hidden_dim),\n self.hidden_activation,\n nn.Linear(self.hidden_dim, self.n_classes_))", "def __init__(self, network: Network):\n self.graph = network.graph", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def display_graph(variables, relations):\n graph = as_networkx_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n nx.draw_networkx(graph, with_labels=True)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")", "def __build_nodes(self):\n self.components = {}\n\n for node in self.get_nodes():\n # Create the node\n assert node not in self.components, \"Node %s already exists\" % node.name\n self.components[node] = Node(name=node,\n node=self.graph.nodes[node],\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n # Add the new components\n self.components.update(self.components[node].get_components())", "def create_computational_graph(node):\n graph = CompGraph()\n graph.build_graph(node)\n return graph", "def build_single_node_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n graph.new_node()\n\n return graph", "def get_dependency_graph(self, zero_index=False):\n directed_graph = nx.DiGraph()\n construction_objects = self.steps\n if zero_index:\n labels = range(len(construction_objects))\n else:\n labels = range(1, len(construction_objects) + 1)\n object_labels = dict(map(lambda x, y: (x, y), construction_objects, labels))\n\n directed_graph.add_nodes_from(object_labels.values())\n for shape in construction_objects:\n for dependency in shape.dependencies:\n directed_graph.add_edge(object_labels[dependency], object_labels[shape])\n\n return object_labels, directed_graph", "def build_graph(self):\n self._reset_iterator_memory()\n self._construct_graph_handler()\n assert self.graph_handler\n for rxn_id in self.graph_handler.get_valid_reaction_ids():\n rxn = db.Reaction(rxn_id, self._reactions)\n self.graph_handler.add_rxn(rxn)", "def generate_networkx_graphs(raw_graphs):\n\n source_graphs = [source_from_raw(raw) for raw in raw_graphs]\n target_graphs = [target_from_raw(raw) for raw in raw_graphs]\n\n return source_graphs, target_graphs", "def make_dag(self, expand=set()):\n G = nx.DiGraph()\n\n ## Inputs-to-Functions\n for f in self.functions:\n # Expand composed models\n if isinstance(f, FunctionModel) and (f.name in expand):\n G_ref = f.model.make_dag(expand=expand - {f})\n G_sub = nx.DiGraph()\n # Add nodes\n G_sub.add_node(f.name + \".var\")\n G_sub.add_node(f.name + \".out\")\n for g in f.model.functions:\n G_sub.add_node(f.name + \".\" + g.name)\n # Add node metadata\n nx.set_node_attributes(G_sub, f.name, \"parent\")\n\n # Add edges\n for u, v, d in G_ref.edges(data=True):\n # Add renamed edge\n if u == \"(var)\":\n G_sub.add_edge(f.name + \".var\", f.name + \".\" + v, **d)\n elif v == \"(out)\":\n G_sub.add_edge(f.name + \".\" + u, f.name + \".out\", **d)\n else:\n G_sub.add_edge(f.name + \".\" + u, f.name + \".\" + v, **d)\n\n # Compose the graphs\n G = nx.compose(G, G_sub)\n\n i_var = set(self.var).intersection(set(f.var))\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(\"(var)\", f.name + \".var\", label=s_var)\n else:\n G.add_edge(\"(var)\", f.name, label=s_var)\n\n ## Function-to-Function\n for i0 in range(len(self.functions)):\n for i1 in range(i0 + 1, len(self.functions)):\n f0 = self.functions[i0]\n f1 = self.functions[i1]\n i_var = set(f0.out).intersection(set(f1.var))\n\n ## If connected\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n ## Handle composed models\n if isinstance(f0, FunctionModel) and (f0.name in expand):\n name0 = f0.name + \".out\"\n else:\n name0 = f0.name\n if isinstance(f1, FunctionModel) and (f1.name in expand):\n name1 = f1.name + \".out\"\n else:\n name1 = f1.name\n\n G.add_edge(name0, name1, label=s_var)\n\n ## Functions-to-Outputs\n for f in self.functions:\n i_out = set(self.out).intersection(set(f.out))\n\n if len(i_out) > 0:\n s_out = \"{}\".format(i_out)\n ## Target composed model's out\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(f.name + \".out\", \"(out)\", label=s_out)\n ## An ordinary function\n else:\n G.add_edge(f.name, \"(out)\", label=s_out)\n\n # Add node metadata\n nx.set_node_attributes(G, {f.name: {\"parent\": self.name}})\n\n # Final metadata\n nx.set_node_attributes(G, {\"(var)\": {\"parent\": self.name}})\n nx.set_node_attributes(G, {\"(out)\": {\"parent\": self.name}})\n\n return G", "def create_directed_graph(coupling_coefs, weighted=False):\n weight_matrix = coupling_coefs_to_weight_matrix(coupling_coefs)\n\n if not weighted:\n weight_matrix = (weight_matrix != 0).astype('int')\n\n G = nx.convert_matrix.from_numpy_matrix(\n weight_matrix,\n create_using=nx.DiGraph()\n )\n return G", "def to_networkx(self, max_vertices: int = 5000) -> nx.Graph:\n graph_nx = nx.Graph()\n for v in self._vertices.values():\n graph_nx.add_node(v.item, kind=v.kind)\n\n for u in v.neighbours:\n if graph_nx.number_of_nodes() < max_vertices:\n graph_nx.add_node(u.item, kind=u.kind)\n\n if u.item in graph_nx.nodes:\n graph_nx.add_edge(v.item, u.item)\n\n if graph_nx.number_of_nodes() >= max_vertices:\n break\n\n return graph_nx", "def get_graph(self, with_fix=False):\n nodes = self.get_tasks()\n if with_fix:\n for n in nodes:\n n.fix_arguments()\n deps = self.get_dependencies()\n graph = nx.DiGraph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from( [d.edge() for d in deps] )\n return graph", "def construct_network_from_neighbours_list(related_characters: list):\n graph = nx.Graph()\n for edge in related_characters:\n sentiment = edge[1]\n color = ''\n if sentiment == 'Positive':\n color = 'g'\n elif sentiment == 'Negative':\n color = 'r'\n elif sentiment == 'Neutral':\n color = 'k'\n # graph.add_node(edge[0][0], popularity=\n graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])\n\n return graph", "def tree_graph(self, parents: dict) -> nx.DiGraph:\n tree_graph = nx.DiGraph(parents)\n for parent, child in tree_graph.edges():\n tree_graph.edges[parent, child][\"weight\"] = self.a[child]\n return tree_graph", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def get_network(self):\n\n # Find which nodes are input and which are output. We may want to store\n # this info somewhere else (like in the genome)\n\n inputs = []\n outputs = []\n bias = []\n edges = []\n node_num = dict() #Map from node_id to zero index node number\n\n for i, node in enumerate(self.node_genes):\n # Create mapping\n node_num[node.node_id] = i\n\n # Store input and output node_numbers\n if node.node_type is INPUT:\n inputs.append(i)\n elif node.node_type is OUTPUT:\n outputs.append(i)\n elif node.node_type is BIAS:\n bias.append(i)\n\n # Create edge list.\n for link in self.link_genes:\n if link.enabled:\n edges.append((node_num[link.to_node.node_id],\n node_num[link.from_node.node_id], link.weight))\n\n\n # Build an adjacency matrix for the network\n n = len(node_num)\n adj_matrix = np.zeros((n, n))\n try:\n for e in edges:\n adj_matrix[e[:2]] = e[2]\n except:\n global GENOME\n GENOME = self\n print([node.node_id for node in self.node_genes])\n print()\n print('len(node_genes)', len(self.node_genes))\n print('edge', e)\n print('adj.shape', adj_matrix.shape)\n sys.exit()\n\n return Network(adj_matrix, inputs, outputs, bias)", "def build_auxiliary_node_connectivity(G):\n directed = G.is_directed()\n\n mapping = {}\n H = nx.DiGraph()\n\n for i, node in enumerate(G):\n mapping[node] = i\n H.add_node('%dA' % i, id=node)\n H.add_node('%dB' % i, id=node)\n H.add_edge('%dA' % i, '%dB' % i, capacity=1)\n\n edges = []\n for (source, target) in G.edges():\n edges.append(('%sB' % mapping[source], '%sA' % mapping[target]))\n if not directed:\n edges.append(('%sB' % mapping[target], '%sA' % mapping[source]))\n H.add_edges_from(edges, capacity=1)\n\n # Store mapping as graph attribute\n H.graph['mapping'] = mapping\n return H", "def generate(self):\n\n g = nx.Graph()\n g.add_nodes_from(self.graph.nodes)\n\n num_nodes = g.number_of_nodes()\n\n degree_sequence = sorted([d for n, d in self.graph.degree()])\n degree_count = Counter(degree_sequence)\n deg, cnt = zip(*degree_count.items())\n\n degree_probs = [c / sum(cnt) for c in cnt]\n\n for i in range(num_nodes):\n num_edges = np.random.choice(a=deg, p=degree_probs) - g.degree[i]\n\n if num_edges > 0:\n ranking = self.ranker.get_ranking(i)\n probs = get_rank_probabilities(len(ranking))\n target_nodes = np.random.choice(a=ranking, p=probs, size=num_edges, replace=False)\n\n for j in target_nodes:\n g.add_edge(i, j)\n\n return g", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict" ]
[ "0.73159355", "0.67514896", "0.6639457", "0.663262", "0.6631219", "0.6617338", "0.65870404", "0.6585843", "0.65561634", "0.64985657", "0.64818686", "0.64802456", "0.64401877", "0.64243275", "0.64049304", "0.6382079", "0.63409054", "0.6319503", "0.6298616", "0.6286521", "0.62707406", "0.6231678", "0.6230883", "0.6181396", "0.6176297", "0.61700135", "0.6168684", "0.61603206", "0.6153772", "0.6134785", "0.61217993", "0.61090463", "0.61058956", "0.6104658", "0.6091806", "0.6090437", "0.60807383", "0.60774815", "0.6074372", "0.6072654", "0.6043911", "0.6037814", "0.6033446", "0.60258394", "0.6019964", "0.60147357", "0.60109526", "0.6010689", "0.60099864", "0.59944755", "0.5989729", "0.5953173", "0.59530836", "0.59330446", "0.59032923", "0.5901917", "0.5892119", "0.5877367", "0.5846533", "0.58459836", "0.5839762", "0.58392954", "0.5837847", "0.5830859", "0.5826497", "0.5816439", "0.57995915", "0.57966924", "0.57956165", "0.5788202", "0.5783869", "0.57808226", "0.57716376", "0.5770386", "0.57701904", "0.57692194", "0.5766506", "0.5764557", "0.5758368", "0.57548875", "0.57523155", "0.57497585", "0.5736046", "0.57315683", "0.57219154", "0.5719993", "0.5717971", "0.57154477", "0.57148045", "0.5712829", "0.5711958", "0.5706526", "0.5695831", "0.5695822", "0.56952244", "0.5693365", "0.56847566", "0.56834924", "0.56692845", "0.5669213" ]
0.81062376
0
Build a networkx graph object from variables and relations.
def as_networkx_bipartite_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables], bipartite=0) graph.add_nodes_from([r.name for r in relations], bipartite=1) for r in relations: for e in r.dimensions: graph.add_edge(r.name, e.name) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph", "def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G", "def parse_graph(self):\n\t\tnx_graph = nx.Graph()\n\t\tfor node in self.vertices:\n\t\t\tnx_graph.add_node(node)\n\n\t\tfor edge in self.edges:\n\t\t\tnode1, node2, weight = edge\n\t\t\tnx_graph.add_edge(node1, node2, weight=weight)\n\n\t\treturn nx_graph", "def build_graph(self):\n pass", "def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g", "def _build_graph(self):\n pass", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def build_graph(graph_dict): \n #make networkX graph\n G = nx.Graph()\n G.add_nodes_from(graph_dict.keys())\n for key in graph_dict:\n for i in range(len(graph_dict[key])):\n G.add_edge(key,graph_dict[key][i])\n return G", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g", "def _create_nx_graph(self):\n #_graph = nx.Graph()\n graph = nx.DiGraph()\n for name, lemma in self._lemmas_info.get_parent_lemmas():\n added_children = []\n for child_n in lemma.evidence_lemmas:\n child_node = str(child_n)\n if not self._should_be_filtered( added_children, child_node ):\n added_children.append( child_node )\n \n graph.add_node( name ) # it's OK if it exists from the previous iteration\n graph.add_node( child_node )\n # lemma1 because lemma2, means that lemma2 -> lemma1\n graph.add_edge( child_node, name )\n \n self._append_source_and_target( graph )\n return graph", "def build_graph(self):\n raise NotImplementedError", "def _get_graph(nodes: Nodes, edges: np.ndarray):\n\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n\n return graph", "def get_graph(self) -> nx.classes.graph.Graph:\n G = nx.Graph()\n # add nodes\n G.add_nodes_from([(room, props) for room, props in self.get_rooms_with_properties().items()])\n # add edges\n G.add_edges_from(self.get_edges_with_properties())\n return G", "def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])", "def build_graph(self):\n self.graph = tf.Graph()\n temp_connections = self.connections\n with self.graph.as_default():\n operations = {}\n\n # create Variables for input vertices\n for neuron_id in self.input_neurons:\n self.inputs[neuron_id] = tf.get_variable(name=str(neuron_id), shape=(),\n initializer=tf.zeros_initializer)\n deletion = []\n while len(temp_connections) > 0:\n for neuron_id in deletion:\n temp_connections.pop(neuron_id, None)\n deletion = []\n keys = list(temp_connections)\n random.shuffle(keys)\n # create input & output vertices\n for neuron_id in temp_connections:\n input_neuron_ids = temp_connections[neuron_id]\n if self.check(input_neuron_ids, operations):\n # weights\n v_weights = tf.constant(self.weights[neuron_id])\n # input vertices\n v_inputs = []\n\n for input_neuron_id in input_neuron_ids:\n if self.is_input_neuron(input_neuron_id):\n vertex = self.inputs[input_neuron_id]\n else:\n vertex = operations[input_neuron_id]\n\n v_inputs.append(vertex)\n\n deletion.append(neuron_id)\n\n # multiply weights and inputs\n mul = tf.multiply(v_inputs, v_weights, str(neuron_id))\n # sum multiplied values\n sum = tf.reduce_sum(mul, name='sum_' + str(neuron_id))\n # apply activation function\n if self.is_output_neuron(neuron_id):\n activation = tf.sigmoid(sum, name=\"output\")\n else:\n activation = tf.nn.leaky_relu(sum, alpha=0.2, name=\"relu_\" + str(neuron_id))\n\n operations[neuron_id] = activation\n if self.is_output_neuron(neuron_id):\n self.output = activation\n return self.graph, self.inputs, self.output", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph", "def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G", "def _construct_graph(self):\n raise NotImplementedError", "def build_graph(friends: list, mutuals: dict) -> nx.classes.graph.Graph:\n friends_ids = [friend['id'] for friend in friends]\n G = nx.Graph()\n G.add_nodes_from(range(len(friends_ids)))\n\n for idx in tqdm(friends_ids):\n node_id = friends_ids.index(idx)\n G.nodes[node_id]['vk_id'] = idx\n G.nodes[node_id]['first_name'] = friends[node_id]['first_name']\n G.nodes[node_id]['last_name'] = friends[node_id]['last_name']\n G.nodes[node_id]['gender'] = friends[node_id]['sex']\n G.nodes[node_id]['relation'] = friends[node_id].get('relation')\n G.nodes[node_id]['city'] = friends[node_id].get('city', {}).get('title')\n G.nodes[node_id]['country'] = friends[node_id].get('country', {}).get('title')\n G.nodes[node_id]['schools'] = friends[node_id].get('schools')\n G.nodes[node_id]['universities'] = friends[node_id].get('universities')\n G.nodes[node_id]['career'] = friends[node_id].get('career')\n idx_mutuals = mutuals.get(idx)\n if idx_mutuals != None:\n edges = [(node_id, friends_ids.index(friend_id)) for friend_id in idx_mutuals]\n G.add_edges_from(edges)\n\n return G", "def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]", "def build_graph(self, nodes, edges):\n\n log.info(\"Building Graph with [%s] nodes and [%s] edges\" % ('NOT_IMPLEMENTED', 'NOT_IMPLEMENTED'))\n\n for node, node_type in nodes.items():\n self.graph.node(node, node.replace('tmp_git_repo/', '', 1), color=NODE_COLORS[node_type])\n\n for left_edge, right_edges in edges.items():\n for right_edge in right_edges:\n self.graph.edge(left_edge, right_edge.import_path, label=right_edge.import_name)", "def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)", "def _create_graph(netlist):\n G = nx.Graph()\n for t in netlist:\n G.add_edges_from([(t.name, t.drain), (t.name, t.gate), (t.name, t.source)])\n return G", "def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()", "def build_graph(nodes):\n\n job_instances_map = {}\n\n # first create node structure\n nodes_map = {}\n root_nodes = []\n for node in nodes:\n new_node = JobGraphNode(node, job_instances_map)\n nodes_map[node.id] = new_node\n # check if it is root node\n try:\n node.relationships.next()\n except StopIteration:\n root_nodes.append(new_node)\n\n # then set relationships\n for _, child in nodes_map.iteritems():\n for relationship in child.cfy_node.relationships:\n parent = nodes_map[relationship.target_node.id]\n parent.add_child(child)\n child.add_parent(parent)\n\n return root_nodes, job_instances_map", "def to_NetworkX(nodes, edges, attributes=None):\n \n import networkx as nx\n # convert to dataframe if numpy array\n if isinstance(nodes, np.ndarray):\n nodes = coords_to_df(nodes)\n if isinstance(edges, np.ndarray):\n edges = pairs_to_df(edges)\n \n G = nx.from_pandas_edgelist(edges)\n if attributes is not None:\n for col in attributes.columns:\n # only for glm extension file:\n # nx.set_node_attributes(G, attributes[col].to_dict(), col.replace('+','AND')) \n nx.set_node_attributes(G, attributes[col].to_dict(), col)\n return G", "def __toNetworkX(self):\n G = nx.Graph()\n G.add_nodes_from(range(self.n))\n for u in range(self.n):\n for v in range(self.n):\n if self.adjacent(u, v):\n G.add_edge(u, v)\n\n return G", "def build_graph(self, graph, inst_name, port_nets):\n return", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n for nd1, nd2, attrs in G.edges(data=True):\n # G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def graph(self):\n graph = nx.DiGraph()\n for name, joint in self.joints.items():\n graph.add_edge(*joint.connects, joint=name)\n return graph", "def create_graph_with_nodes(src_nodes, get_id: callable, get_attrs: callable):\n graph = nx.MultiDiGraph()\n for node in src_nodes:\n graph.add_node(get_id(node), **get_attrs(node))\n return graph", "def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2", "def gen_graph(self):", "def network(self):\n G = nx.MultiDiGraph()\n reaction_hash = []\n product_count = 0\n mapping = {}\n reaction_count = 0\n\n for r in self.reactions:\n reaction_count += 1\n\n reaction_dict = r.__dict__\n G.add_edge(reaction_dict.get('left'), hash(r))\n G.add_edge(reaction_dict.get('right'), hash(r))\n G.add_edge(hash(r), reaction_dict.get('left2'))\n G.add_edge(hash(r), reaction_dict.get('right2'))\n\n product_count += 1\n mapping[reaction_dict.get('left')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('left2')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right2')] = \"x{}\".format(product_count)\n\n mapping[hash(r)] = \"r{}\".format(reaction_dict.get(\"reaction_n\"))\n reaction_hash.append(hash(r))\n\n return G, mapping", "def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)", "def from_onnx(self, graph):\n # parse network inputs, aka parameters\n for init_tensor in graph.initializer:\n if not init_tensor.name.strip():\n raise ValueError(\"Tensor's name is required.\")\n self._params[init_tensor.name] = self._parse_array(init_tensor)\n\n # converting GraphProto message\n for i in graph.input:\n if i.name in self._params:\n # i is a param instead of input\n name_param = 'param_{}'.format(self._num_param)\n self._num_param += 1\n self._params[name_param] = self._params.pop(i.name)\n self._nodes[name_param] = mx.sym.Variable(name=name_param,\n shape=self._params[name_param].shape)\n self._renames[i.name] = name_param\n else:\n name_input = 'input_{}'.format(self._num_input)\n self._num_input += 1\n self._nodes[name_input] = mx.sym.Variable(name=name_input)\n self._renames[i.name] = name_input\n\n # constructing nodes, nodes are stored as directed acyclic graph\n # converting NodeProto message\n for node in graph.node:\n op_name = node.op_type\n node_name = node.name.strip()\n node_name = node_name if node_name else None\n onnx_attr = self._parse_attr(node.attribute)\n new_op, mx_attr = _convert_operator(op_name, onnx_attr)\n inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]\n\n # some workarounds for onnx problem\n mx_attr = self._fix_bias(new_op, mx_attr, len(inputs))\n mx_attr = self._fix_channels(new_op, mx_attr, list(node.input))\n self._fix_bias_shape(node.op_type, node.input, onnx_attr)\n\n # calling again to get new symbols after some workarounds\n inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]\n\n # onnx's Gemm operator also supports broadcasting C input which\n # mxnet's equivalent linalg_gemm doesn't. So using combination of\n # transpose and FullyConnected operators.\n if op_name == 'Gemm':\n new_op, inputs, mx_attr = self._fix_gemm('FullyConnected', inputs, onnx_attr)\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(inputs, mx_attr)\n elif op_name == 'AveragePool' and onnx_attr.get('pads') is not None or \\\n op_name == 'MaxPool' and onnx_attr.get('pads') is not None:\n op = self._fix_pooling(op_name, inputs, onnx_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(inputs, mx_attr)\n else:\n op = new_op(name=node_name, *inputs, **mx_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n\n assert len(node_output) == len(op.list_outputs()), (\n \"Number of output mismatch {} vs {} in {}.\".format(\n len(node_output), len(op.list_outputs()), op_name))\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n # now return the outputs\n out = [self._nodes[i.name] for i in graph.output]\n if len(out) > 1:\n out = mx.sym.Group(out)\n else:\n out = out[0]\n return out, self._params", "def build_inference_graph(self):\n self.build_train_graph()", "def CreateGraph(Points, Edges):\n G = nx.Graph()\n newG, Nodes = CreateVertices(Points, G)\n Graph = CreateEdges(Nodes, Edges, newG)\n return Graph", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def build_2_node_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_edge(1, 2)\n\n return graph", "def generate_dag_graph(self):\n # generate ranom graph\n G = nx.DiGraph()\n G.add_nodes_from(range(self.nodes))\n return self.fix_graph(G)", "def nx_graph_from_dot_file(dot_file_path):\n # this does not understand dot statements like X->Y,Z;\n # nx_graph = nx.nx_pydot.read_dot(dot_file_path)\n\n nodes, edges = DotTool.read_dot_file(dot_file_path)\n g = nx.DiGraph()\n g.add_edges_from(edges)\n\n return g", "def get(self):\n self.network = gt.load_graph(self.dotfile)\n\n if self.strongcomponent:\n self.network=gt.extract_largest_component(\n self.network, directed=True, prune=True)\n\n if self.removeselfloops:\n gt.remove_self_loops(self.network)\n\n self.nm = self.network.new_vertex_property(\"string\")\n nm2 = self.network.new_vertex_property(\"string\")\n self.hl = self.network.new_vertex_property(\"bool\")\n self.network.vertex_properties[\"text\"] = self.nm\n self.network.vertex_properties[\"text\"] = nm2\n names=[]\n for v in self.network.vertices():\n if v.out_degree() > -1:\n self.nm[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.hl[v]=False\n else:\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.nm[v]=''\n self.hl[v]=False\n names=names+[nm2[v]]\n\n NAMES=pd.Series(list(set(names)),\n name='varclass').reset_index().set_index('varclass')\n self.varclass = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"varclass\"] = self.varclass\n for v in self.network.vertices():\n self.varclass[v]=NAMES.loc[nm2[v]].values[0]\n\n self.od = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.od\n for v in self.network.vertices():\n self.od[v]=self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=self.exponent)+5\n self.ods = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.ods\n for v in self.network.vertices():\n self.ods[v]=1*self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=1)+2\n\n self.ew = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight\"] = self.ew\n for e in self.network.edges():\n self.ew[e]=float(self.network.ep.weight[e])**1\n\n self.ew_pen = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight_pen\"] = self.ew_pen\n for e in self.network.edges():\n self.ew_pen[e]=4/(1 + np.exp(-.05-np.fabs(float(self.network.ep.weight[e]))))\n\n self.e_marker = self.network.new_edge_property(\"string\")\n self.network.edge_properties[\"e_marker\"] = self.e_marker\n for e in self.network.edges():\n if float(self.network.ep.weight[e]) < 0:\n self.e_marker[e]='bar'\n else:\n self.e_marker[e]='arrow'\n\n self.deg = self.network.degree_property_map(\"out\")\n\n self.ecol = self.network.new_edge_property(\"vector<double>\")\n self.network.edge_properties[\"ecol\"] = self.ecol\n for e in self.network.edges():\n col=cm.ScalarMappable(mpl.colors.Normalize(vmin=-self.edgecollim,\n vmax=self.edgecollim),\n cmap=self.edgecolmap).to_rgba(float(self.ew[e]))\n col=list(col)\n col[3]=self.edgealpha\n self.ecol[e]=tuple(col)\n\n self.pos = gt.graphviz_draw(self.network,\n overlap=False,\n vsize=20,\n sep=self.nodesep,\n output=None)\n\n self.control = self.network.new_edge_property(\"vector<double>\")\n for e in self.network.edges():\n d = np.sqrt(np.sum((self.pos[e.source()].a\n - self.pos[e.target()].a) ** 2))\n d=d/2\n self.control[e] = [0.0,0.0,0, .2*d, 0.5, d,1,0]\n\n if self.outfile is not None:\n gt.graph_draw(self.network,nodesfirst=False,\n pos=self.pos,\n vertex_halo=self.hl,\n vertex_halo_color=[.2,.2,.2,.1],\n edge_pen_width=self.ew_pen,\n edge_end_marker=self.e_marker,\n vorder=self.deg,\n edge_marker_size=10,\n vertex_color=self.varclass,#[.5,.5,.5,.3],\n edge_color=self.ecol,#[.5,.5,.5,.5],\n vertex_pen_width=1.5,\n vertex_size=self.od,\n vertex_text=self.nm,\n vcmap=(self.cmap,self.alpha),\n edge_control_points=self.control,\n vertex_fill_color=self.varclass,#deg,\n vertex_font_size=self.ods,\n vertex_text_color=[.1,.1,.1,.8],\n #vertex_text_position=0,\n output=self.outfile)", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def build_graph_from_triplets(num_nodes, num_rels, triplets):\n g = dgl.DGLGraph()\n g.add_nodes(num_nodes)\n src, rel, dst = triplets\n src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))\n rel = np.concatenate((rel, rel + num_rels))\n edges = sorted(zip(dst, src, rel))\n dst, src, rel = np.array(edges).transpose()\n g.add_edges(src, dst)\n norm = comp_deg_norm(g)\n return g, rel, norm", "def nx2gt(nxG):\n # Phase 0: Create a directed or undirected graph-tool Graph\n gtG = Graph(directed=nxG.is_directed())\n\n # Add the Graph properties as \"internal properties\"\n for key, value in list(nxG.graph.items()):\n # Convert the value and key into a type for graph-tool\n tname, value, key = get_prop_type(value, key)\n\n prop = gtG.new_graph_property(tname) # Create the PropertyMap\n \n gtG.graph_properties[key] = prop # Set the PropertyMap\n gtG.graph_properties[key] = value # Set the actual value\n\n # Phase 1: Add the vertex and edge property maps\n # Go through all nodes and edges and add seen properties\n # Add the node properties first\n nprops = set() # cache keys to only add properties once\n for node, data in nxG.nodes(data=True):\n\n # Go through all the properties if not seen and add them.\n for key, val in list(data.items()): \n if key in nprops: continue # Skip properties already added\n\n # Convert the value and key into a type for graph-tool\n tname, _, key = get_prop_type(val, key)\n\n prop = gtG.new_vertex_property(tname) # Create the PropertyMap\n gtG.vertex_properties[key] = prop # Set the PropertyMap\n\n # Add the key to the already seen properties\n nprops.add(key)\n\n # Also add the node id: in NetworkX a node can be any hashable type, but\n # in graph-tool node are defined as indices. So we capture any strings\n # in a special PropertyMap called 'id' -- modify as needed!\n gtG.vertex_properties['id'] = gtG.new_vertex_property('string')\n\n # Add the edge properties second\n eprops = set() # cache keys to only add properties once\n for src, dst, data in nxG.edges(data=True):\n\n # Go through all the edge properties if not seen and add them.\n for key, val in list(data.items()): \n if key in eprops: continue # Skip properties already added\n\n # Convert the value and key into a type for graph-tool\n tname, _, key = get_prop_type(val, key)\n \n prop = gtG.new_edge_property(tname) # Create the PropertyMap\n gtG.edge_properties[key] = prop # Set the PropertyMap\n\n # Add the key to the already seen properties\n eprops.add(key)\n\n # Phase 2: Actually add all the nodes and vertices with their properties\n # Add the nodes\n vertices = {} # vertex mapping for tracking edges later\n for node, data in nxG.nodes(data=True):\n\n # Create the vertex and annotate for our edges later\n v = gtG.add_vertex()\n vertices[node] = v\n\n # Set the vertex properties, not forgetting the id property\n data['id'] = str(node)\n for key, value in list(data.items()):\n gtG.vp[key][v] = value # vp is short for vertex_properties\n\n # Add the edges\n for src, dst, data in nxG.edges(data=True):\n\n # Look up the vertex structs from our vertices mapping and add edge.\n e = gtG.add_edge(vertices[src], vertices[dst])\n\n # Add the edge properties\n for key, value in list(data.items()):\n gtG.ep[key][e] = value # ep is short for edge_properties\n\n # Done, finally!\n return gtG", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def build_graph(num_accounts):\n\n all_requirements = []\n\n # Syslog account requirements\n syslog_account = 'syslog'\n all_requirements.append(CreateAccount('Create [Syslog]', syslog_account))\n all_requirements.append(AdminAccess('Admin Access [Syslog]', syslog_account))\n all_requirements.append(S3Bucket('S3 Bucket [Syslog]', syslog_account))\n all_requirements.append(SQSQueue('SQS Queue [Syslog]', syslog_account))\n\n # PDU account(s) requirements\n for n in range(1, num_accounts+1):\n pdu_account = \"PDU{}\".format(n)\n all_requirements.append(CreateAccount(\"Create [{}]\".format(pdu_account), pdu_account))\n all_requirements.append(AdminAccess(\"Admin Access [{}]\".format(pdu_account), pdu_account))\n all_requirements.append(CloudTrailSNSTopic(\"CloudTrail SNS [{}]\".format(pdu_account), pdu_account))\n all_requirements.append(CloudTrailTrail(\"CloudTrail Trail [{}]\".format(pdu_account), pdu_account))\n\n # Build graph based on each requirement's dependencies\n g = nx.DiGraph()\n for req in all_requirements:\n print(\"Adding node '{}'\".format(req))\n g.add_node(req)\n dependencies = req.get_dependencies(all_requirements)\n for dep in dependencies:\n print(\"Adding edge from '{}' to '{}'\".format(dep, req))\n g.add_edge(dep, req)\n return g", "def populate_graph(self):", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def __graph__(self):\n\n graph = rdflib.Graph()\n for prefix, name in self.rml.namespaces():\n graph.namespace_manager.bind(prefix, name)\n return graph", "def as_graph(self, graph=None):\n # at this level it works but what if we have nested structures?\n # What is a graph if not a set of links? Why do not we put all into a graph?\n if not graph:\n graph = nx.Graph()\n\n for link in self.sequence:\n logging.info(link)\n (l, r) = link.value\n (ln, rn) = link.name\n logging.info (\"Node: %s %s \" % (l.name, str(l.shannon)))\n graph.add_node(l.name, shannon=l.shannon, IC=l.IC)\n logging.info (\"Node: %s %s \" % (r.name, str(r.shannon)))\n graph.add_node(r.name, shannon=r.shannon, IC=r.IC)\n logging.info (\"Edge: %s %s %s \" % (l.name, r.name, str(link.PMI)))\n graph.add_edge(l.name, r.name, pmi=link.PMI)\n\n return graph", "def build_graph(self, graph, inst_name, port_nets):\n self.add_graph_edges(graph, port_nets)", "def construct_graph(social_edges, spatial_edges, output_path=None):\n G = nx.DiGraph()\n with open(social_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], USER_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n business_nodes = set([])\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n lat = float(edge[2])\n lng = float(edge[3])\n if edge[-2] not in business_nodes:\n G.add_node(BUSINESS_NODE_PREFIX + edge[-2], spatial={'lat': lat, 'lng': lng})\n business_nodes.add(edge[-2])\n\n with open(spatial_edges, 'r') as f:\n for l in f.read().splitlines():\n edge = l.split(\"\\t\")\n G.add_edge(USER_NODE_PREFIX + edge[0], BUSINESS_NODE_PREFIX + edge[-2], weight=float(edge[-1]))\n\n if output_path:\n pickle.dump(G, open(output_path, 'w'))\n return G", "def graph_from_dict(self,\n data,\n source_label,\n target_label,\n data_source='local',\n source_attributes=[],\n target_attributes=[]):\n G = nx.Graph()\n for row in data:\n if source_label not in row or target_label not in row:\n continue\n source = row[source_label]\n target = row[target_label]\n\n if source == \"\" or target == \"\":\n continue\n\n G.add_edge(source, target)\n G.edges[source, target][data_source] = row\n G.nodes[source][data_source] = {}\n G.nodes[target][data_source] = {}\n for attribute in source_attributes:\n G.nodes[source][data_source][attr] = data[attr]\n for attribute in target_attributes:\n G.nodes[target][data_source][attr] = data[attr]\n\n return G", "def __init__(self, variables, constraints):\n self.variables = variables\n self.constraints = constraints\n for c in constraints:\n c.var1.peers.append(c.var2)\n c.var2.peers.append(c.var1)", "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def prepare_graph(label, nodes, edges, graphID):\n features = {'label': label}\n\n G = nx.DiGraph()\n nodes[\"id\"] = nodes[\"id\"].apply(lambda x : str(x))\n features['num_nodes'] = nodes.shape[0]\n op_node = None\n times = []\n friends = []\n followers = []\n for index, row in nodes.iterrows():\n G.add_node(row['id'], time=row['time'], friends=row['friends'], followers = row['followers'])\n times.append(row['time'])\n friends.append(2**row['friends'])\n followers.append(2**row['followers'])\n if row['time'] == 0:\n features['poster_friend_cnt'] = 2**row['friends']\n features['poster_follower_cnt'] = 2**row['followers']\n tweeter_id = row['id']\n op_node = row['id']\n features['avg_time'] = np.mean(times)\n features['avg_friends'] = np.mean(friends)\n features['avg_followers'] = np.mean(followers)\n features['max_followers'] = max(followers)\n features['max_friends'] = max(friends)\n features['friends_25th_percentile'] = np.percentile(friends, 25)\n features['friends_75th_percentile'] = np.percentile(friends, 75)\n features['followers_25th_percentile'] = np.percentile(followers, 25)\n features['followers_75th_percentile'] = np.percentile(followers, 75)\n node_list = []\n edge_count = 0\n for pair in edges:\n node1, node2 = pair.split()[0], pair.split()[1]\n node_list.append(node1)\n node_list.append(node2)\n G.add_edge(node1, node2)\n edge_count += 1\n features['num_edges'] = edge_count\n sum_users_without_followers = sum([1 for (node, val) in G.in_degree() if val==0])\n features['ratio_users_w/out_followers'] = sum_users_without_followers / len(G.nodes)\n features['num_connected_components'] = nx.number_strongly_connected_components(G)\n features['number_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id)\n features['percentage_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id) / features['poster_follower_cnt']\n features['avg_clustering'] = nx.average_clustering(G)\n features['op_clustering'] = nx.clustering(G,op_node)\n features['transitivity'] = nx.transitivity(G)\n node_list = list(set(node_list))\n features['nodeID_list'] = np.array(node_list)\n features['graph_id'] = graphID\n return features, G", "def build_3_node_line_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_node()\n\n graph.new_edge(1, 2)\n graph.new_edge(2, 3)\n\n return graph", "def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(axis=-1)\n\n return graph_from_edges(variable_names, dist_func, edges)", "def make_join_graph(parsed_join_clauses):\n g = nx.Graph()\n for t1, c1, t2, c2 in parsed_join_clauses:\n g.add_node(t1)\n g.add_node(t2)\n if not g.has_edge(t1, t2):\n g.add_edge(t1, t2, join_columns={t1: [c1], t2: [c2]})\n else:\n edge = g[t1][t2]\n edge[\"join_columns\"][t1].append(c1)\n edge[\"join_columns\"][t2].append(c2)\n return g", "def nx_to_neo4j(nx_graph=None):\n\n if not nx_graph:\n nx_graph = json_to_nx(\"LegalKnowledgeGraph.json\")\n authenticate(ENV[\"DB_URL\"].replace(\"http://\", \"\"), ENV[\"DB_USERNAME\"],ENV[\"DB_PASSWORD\"]) # Accessing the NEO4J server\n neo4j_graph = Graph(ENV[\"DB_URL\"]+\"/db/data/\")\n string_to_instance_mapping = dict()\n\n list_node = list(nx_graph.nodes(data=True))\n for i in range(len(list_node)):\n node_instance = Node(list_node[i][1][\"type\"], id=list_node[i][0])\n string_to_instance_mapping[list_node[i][0]] = node_instance\n\n list_edges=list(nx_graph.edges())\n for i in range(nx_graph.number_of_edges()):\n source_node_instance = string_to_instance_mapping[list_edges[i][0]]\n target_node_instance = string_to_instance_mapping[list_edges[i][1]]\n b = Relationship(source_node_instance, \"MAPS TO\", target_node_instance)\n neo4j_graph.create(b)", "def reconstruct_graph(inputs, outputs, tag=None):\r\n if tag is None:\r\n tag = ''\r\n nw_inputs = [safe_new(x, tag) for x in inputs]\r\n givens = OrderedDict()\r\n for nw_x, x in izip(nw_inputs, inputs):\r\n givens[x] = nw_x\r\n allinputs = theano.gof.graph.inputs(outputs)\r\n for inp in allinputs:\r\n if isinstance(inp, theano.Constant):\r\n givens[inp] = inp.clone()\r\n\r\n nw_outputs = clone(outputs, replace=givens)\r\n return (nw_inputs, nw_outputs)", "def graph(pkgs: Dict[str, BinaryPackage]) -> nx.DiGraph:\n # To get build dependencies from a source package\n build_deps = {}\n # To get the source package for a binary package\n src_pkgs = {}\n # To get the directory for a source package\n directories = {}\n\n # Build maps mentioned above\n for _, pkg in pkgs.items():\n build_deps[pkg.source] = pkg.build_deps\n src_pkgs[pkg.name] = pkg.source\n directories[pkg.source] = pkg.dir\n\n # Convert binary build dependencies to local directories\n for src, d in directories.items():\n src_deps = []\n for dep in build_deps[src]:\n if dep in src_pkgs:\n src_deps.append(directories[src_pkgs[dep]])\n build_deps[src] = src_deps\n\n # Create graph from build dependencies\n dep_graph = nx.DiGraph()\n for src, d in directories.items():\n dep_graph.add_node(d)\n dep_graph.add_nodes_from(build_deps[src])\n for dep in build_deps[src]:\n dep_graph.add_edge(d, dep)\n\n return dep_graph", "def __init__(self, variables, constraints):\n self.__variables = variables\n self.__constraints = constraints\n\n self.__make_node_consistent()", "def build_graph(self):\n if self.model == 'dense':\n # ForecastNet with two densely connected hidden layers in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv':\n # ForecastNet with a convlutional neural network in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_conv_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'dense2':\n # ForecastNet with two densely connected hidden layers in a cell and linear outputs\n self.outputs, self.cost = forecastnet_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv2':\n # ForecastNet with a convolutional neural network in a cell and linear outputs\n self.outputs, self.cost = forecastnet_conv_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)", "def _CreateAdjacencyListGraph(self):\n graph = dict()\n for nodes in self.nodes:\n graph[nodes[0]] = set()\n for edges in self.edges:\n graph[edges[0]].add(edges[1])\n return graph", "def _create_reference_connectivity_graph(self):\n #take the self._residue_graph and create a replicate (without the misc attributes) with the atoms_with_positions\n _reference_connectivity_graph = nx.Graph()\n atoms_with_positions = set(self._atoms_with_positions)\n\n #iterate over all the bonds\n for bond in self._residue_graph.edges():\n if set(bond).issubset(atoms_with_positions):\n #if both of the atoms in the bond are in atoms_with_positions, we can add the atoms/bonds to the reference\n _reference_connectivity_graph.add_edge(*bond)\n\n return _reference_connectivity_graph", "def build_graph(self, objects_: Union[Dict[str, List[Union[str, dict, int]]], NamedTuple],\n root_name: str, *branch_names: str) -> list:\n\n all_nodes = []\n for key, value in objects_.items():\n for index, items in enumerate(value):\n if isinstance(items, dict):\n vertices = self.initialize_vertices(objects_=items,\n root_name=root_name,\n independent=False,\n group=index)\n all_nodes.extend(vertices)\n continue\n\n root = set()\n\n for vertex in all_nodes:\n if vertex[root_name]:\n root.add(vertex[root_name])\n\n for branch in branch_names:\n if not isinstance(branch, str):\n raise TypeError(f\"Expected a string, got {type(branch)}\")\n\n try:\n if vertex[branch]:\n branches = self.locate_graph_link(vertex_list=all_nodes,\n branch_name=branch)\n except KeyError:\n continue\n\n share = self.initialize_connection(branch_iterable=branches, root_iterable=root)\n\n return self.find_nodes_subtype(root_name)", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def build_graph(self):\n return nn.Sequential(\n nn.Linear(self.input_dim, self.hidden_dim),\n self.hidden_activation,\n nn.Linear(self.hidden_dim, self.n_classes_))", "def __init__(self, network: Network):\n self.graph = network.graph", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def display_graph(variables, relations):\n graph = as_networkx_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n nx.draw_networkx(graph, with_labels=True)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")", "def __build_nodes(self):\n self.components = {}\n\n for node in self.get_nodes():\n # Create the node\n assert node not in self.components, \"Node %s already exists\" % node.name\n self.components[node] = Node(name=node,\n node=self.graph.nodes[node],\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n # Add the new components\n self.components.update(self.components[node].get_components())", "def create_computational_graph(node):\n graph = CompGraph()\n graph.build_graph(node)\n return graph", "def build_single_node_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n graph.new_node()\n\n return graph", "def get_dependency_graph(self, zero_index=False):\n directed_graph = nx.DiGraph()\n construction_objects = self.steps\n if zero_index:\n labels = range(len(construction_objects))\n else:\n labels = range(1, len(construction_objects) + 1)\n object_labels = dict(map(lambda x, y: (x, y), construction_objects, labels))\n\n directed_graph.add_nodes_from(object_labels.values())\n for shape in construction_objects:\n for dependency in shape.dependencies:\n directed_graph.add_edge(object_labels[dependency], object_labels[shape])\n\n return object_labels, directed_graph", "def generate_networkx_graphs(raw_graphs):\n\n source_graphs = [source_from_raw(raw) for raw in raw_graphs]\n target_graphs = [target_from_raw(raw) for raw in raw_graphs]\n\n return source_graphs, target_graphs", "def build_graph(self):\n self._reset_iterator_memory()\n self._construct_graph_handler()\n assert self.graph_handler\n for rxn_id in self.graph_handler.get_valid_reaction_ids():\n rxn = db.Reaction(rxn_id, self._reactions)\n self.graph_handler.add_rxn(rxn)", "def make_dag(self, expand=set()):\n G = nx.DiGraph()\n\n ## Inputs-to-Functions\n for f in self.functions:\n # Expand composed models\n if isinstance(f, FunctionModel) and (f.name in expand):\n G_ref = f.model.make_dag(expand=expand - {f})\n G_sub = nx.DiGraph()\n # Add nodes\n G_sub.add_node(f.name + \".var\")\n G_sub.add_node(f.name + \".out\")\n for g in f.model.functions:\n G_sub.add_node(f.name + \".\" + g.name)\n # Add node metadata\n nx.set_node_attributes(G_sub, f.name, \"parent\")\n\n # Add edges\n for u, v, d in G_ref.edges(data=True):\n # Add renamed edge\n if u == \"(var)\":\n G_sub.add_edge(f.name + \".var\", f.name + \".\" + v, **d)\n elif v == \"(out)\":\n G_sub.add_edge(f.name + \".\" + u, f.name + \".out\", **d)\n else:\n G_sub.add_edge(f.name + \".\" + u, f.name + \".\" + v, **d)\n\n # Compose the graphs\n G = nx.compose(G, G_sub)\n\n i_var = set(self.var).intersection(set(f.var))\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(\"(var)\", f.name + \".var\", label=s_var)\n else:\n G.add_edge(\"(var)\", f.name, label=s_var)\n\n ## Function-to-Function\n for i0 in range(len(self.functions)):\n for i1 in range(i0 + 1, len(self.functions)):\n f0 = self.functions[i0]\n f1 = self.functions[i1]\n i_var = set(f0.out).intersection(set(f1.var))\n\n ## If connected\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n ## Handle composed models\n if isinstance(f0, FunctionModel) and (f0.name in expand):\n name0 = f0.name + \".out\"\n else:\n name0 = f0.name\n if isinstance(f1, FunctionModel) and (f1.name in expand):\n name1 = f1.name + \".out\"\n else:\n name1 = f1.name\n\n G.add_edge(name0, name1, label=s_var)\n\n ## Functions-to-Outputs\n for f in self.functions:\n i_out = set(self.out).intersection(set(f.out))\n\n if len(i_out) > 0:\n s_out = \"{}\".format(i_out)\n ## Target composed model's out\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(f.name + \".out\", \"(out)\", label=s_out)\n ## An ordinary function\n else:\n G.add_edge(f.name, \"(out)\", label=s_out)\n\n # Add node metadata\n nx.set_node_attributes(G, {f.name: {\"parent\": self.name}})\n\n # Final metadata\n nx.set_node_attributes(G, {\"(var)\": {\"parent\": self.name}})\n nx.set_node_attributes(G, {\"(out)\": {\"parent\": self.name}})\n\n return G", "def create_directed_graph(coupling_coefs, weighted=False):\n weight_matrix = coupling_coefs_to_weight_matrix(coupling_coefs)\n\n if not weighted:\n weight_matrix = (weight_matrix != 0).astype('int')\n\n G = nx.convert_matrix.from_numpy_matrix(\n weight_matrix,\n create_using=nx.DiGraph()\n )\n return G", "def to_networkx(self, max_vertices: int = 5000) -> nx.Graph:\n graph_nx = nx.Graph()\n for v in self._vertices.values():\n graph_nx.add_node(v.item, kind=v.kind)\n\n for u in v.neighbours:\n if graph_nx.number_of_nodes() < max_vertices:\n graph_nx.add_node(u.item, kind=u.kind)\n\n if u.item in graph_nx.nodes:\n graph_nx.add_edge(v.item, u.item)\n\n if graph_nx.number_of_nodes() >= max_vertices:\n break\n\n return graph_nx", "def construct_network_from_neighbours_list(related_characters: list):\n graph = nx.Graph()\n for edge in related_characters:\n sentiment = edge[1]\n color = ''\n if sentiment == 'Positive':\n color = 'g'\n elif sentiment == 'Negative':\n color = 'r'\n elif sentiment == 'Neutral':\n color = 'k'\n # graph.add_node(edge[0][0], popularity=\n graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])\n\n return graph", "def get_graph(self, with_fix=False):\n nodes = self.get_tasks()\n if with_fix:\n for n in nodes:\n n.fix_arguments()\n deps = self.get_dependencies()\n graph = nx.DiGraph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from( [d.edge() for d in deps] )\n return graph", "def tree_graph(self, parents: dict) -> nx.DiGraph:\n tree_graph = nx.DiGraph(parents)\n for parent, child in tree_graph.edges():\n tree_graph.edges[parent, child][\"weight\"] = self.a[child]\n return tree_graph", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def build_auxiliary_node_connectivity(G):\n directed = G.is_directed()\n\n mapping = {}\n H = nx.DiGraph()\n\n for i, node in enumerate(G):\n mapping[node] = i\n H.add_node('%dA' % i, id=node)\n H.add_node('%dB' % i, id=node)\n H.add_edge('%dA' % i, '%dB' % i, capacity=1)\n\n edges = []\n for (source, target) in G.edges():\n edges.append(('%sB' % mapping[source], '%sA' % mapping[target]))\n if not directed:\n edges.append(('%sB' % mapping[target], '%sA' % mapping[source]))\n H.add_edges_from(edges, capacity=1)\n\n # Store mapping as graph attribute\n H.graph['mapping'] = mapping\n return H", "def get_network(self):\n\n # Find which nodes are input and which are output. We may want to store\n # this info somewhere else (like in the genome)\n\n inputs = []\n outputs = []\n bias = []\n edges = []\n node_num = dict() #Map from node_id to zero index node number\n\n for i, node in enumerate(self.node_genes):\n # Create mapping\n node_num[node.node_id] = i\n\n # Store input and output node_numbers\n if node.node_type is INPUT:\n inputs.append(i)\n elif node.node_type is OUTPUT:\n outputs.append(i)\n elif node.node_type is BIAS:\n bias.append(i)\n\n # Create edge list.\n for link in self.link_genes:\n if link.enabled:\n edges.append((node_num[link.to_node.node_id],\n node_num[link.from_node.node_id], link.weight))\n\n\n # Build an adjacency matrix for the network\n n = len(node_num)\n adj_matrix = np.zeros((n, n))\n try:\n for e in edges:\n adj_matrix[e[:2]] = e[2]\n except:\n global GENOME\n GENOME = self\n print([node.node_id for node in self.node_genes])\n print()\n print('len(node_genes)', len(self.node_genes))\n print('edge', e)\n print('adj.shape', adj_matrix.shape)\n sys.exit()\n\n return Network(adj_matrix, inputs, outputs, bias)", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict" ]
[ "0.81066954", "0.6750233", "0.6639164", "0.66322136", "0.6630946", "0.6617218", "0.6586329", "0.65858966", "0.65559703", "0.6498455", "0.64820826", "0.6480336", "0.6440278", "0.64248806", "0.6404701", "0.6381441", "0.63398075", "0.6318121", "0.62991834", "0.6286606", "0.62709624", "0.6231524", "0.6231379", "0.61804515", "0.61755943", "0.61700547", "0.6168517", "0.6159621", "0.61529714", "0.6134124", "0.61215794", "0.6109283", "0.6105704", "0.61043173", "0.60908306", "0.6090599", "0.6080594", "0.6075774", "0.6073858", "0.6072598", "0.60445666", "0.6037513", "0.6032061", "0.6026378", "0.602024", "0.6013448", "0.6010675", "0.60104275", "0.60095716", "0.5993891", "0.5991007", "0.5952876", "0.59525317", "0.5934177", "0.5903695", "0.5901864", "0.589299", "0.5877245", "0.58457065", "0.5845489", "0.5839418", "0.5838934", "0.58375037", "0.58321756", "0.58260036", "0.5817166", "0.5799169", "0.5797646", "0.5795518", "0.57890284", "0.57849276", "0.5780178", "0.5772816", "0.5769898", "0.5769465", "0.5769425", "0.576524", "0.5764016", "0.57579035", "0.575562", "0.5752927", "0.57497287", "0.573639", "0.5731224", "0.5722314", "0.57196826", "0.5717507", "0.57152987", "0.5714696", "0.5713538", "0.5711297", "0.5705234", "0.5696113", "0.5695786", "0.56944466", "0.5692901", "0.56842434", "0.5684015", "0.5669575", "0.5669575" ]
0.73165965
1
Display the variables and relation as a graph, using networkx and matplotlib.
def display_graph(variables, relations): graph = as_networkx_graph(variables, relations) # Do not crash if matplotlib is not installed try: import matplotlib.pyplot as plt nx.draw_networkx(graph, with_labels=True) # nx.draw_random(graph) # nx.draw_circular(graph) # nx.draw_spectral(graph) plt.show() except ImportError: print("ERROR: cannot display graph, matplotlib is not installed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()", "def plot_graph(self) -> None:", "def display_bipartite_graph(variables, relations):\n graph = as_networkx_bipartite_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n pos = nx.drawing.spring_layout(graph)\n variables = set(n for n, d in graph.nodes(data=True) if d[\"bipartite\"] == 0)\n factors = set(graph) - variables\n nx.draw_networkx_nodes(\n graph,\n pos=pos,\n with_labels=True,\n nodelist=variables,\n node_shape=\"o\",\n node_color=\"b\",\n label=\"variables\",\n alpha=0.5,\n )\n nx.draw_networkx_nodes(\n graph,\n pos=pos,\n with_labels=True,\n nodelist=factors,\n node_shape=\"s\",\n node_color=\"r\",\n label=\"factors\",\n alpha=0.5,\n )\n nx.draw_networkx_labels(graph, pos=pos)\n nx.draw_networkx_edges(graph, pos=pos)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")", "def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()", "def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def network_graph(net_dict=None):\n if net_dict == None:\n net_dict = {}\n else:\n G = nx.from_dict_of_lists(net_dict)\n plt.figure(num=None, figsize=(30, 30), dpi=80, facecolor='w', edgecolor='c')\n nx.draw_networkx(G, with_labels=True, alpha=0.5, edge_color='c', cmap=plt.cm.GnBu)\n plt.savefig(\"metabolism_5years.png\", bbox_inches='tight')", "def showGraph(self, file_name = \"\"):\n \n # prepare edges and weights for visualization\n edges = self.graph.edges()\n weights = [self.graph_data[u]['pheromones'][v] for u,v in edges]\n weights_sum = sum(weights)\n weights = [ (w/weights_sum)*50 for w in weights]\n \n # prepare different shades of red to be used to optionally differentiate\n # between edges with different costs\n # to show more informatiion on the same graph\n colors = []\n max_cost = max([self.graph_data[u]['costs'][v] for u,v in edges])\n for u,v in edges:\n if self.graph_data[u]['costs'][v] <= max_cost/32:\n colors.append('#ff7f7f')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/16:\n colors.append('#ff6666')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/8:\n colors.append('#ff4c4c')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/4:\n colors.append('#ff3232')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/2:\n colors.append('#ff1919')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost:\n colors.append('#ff0000')\n continue\n \n # print the graph \n pos=nx.circular_layout(self.graph)\n nx.draw( self.graph,pos=pos,node_size=200,node_color='#A8A8A8', with_labels=True,edges=edges, edge_color=colors,edge_cmap=plt.cm.Blues, width=weights)\n if file_name != \"\":\n path = \"img/\"+file_name\n plt.savefig(path, format=\"PNG\")\n plt.show()", "def visualize_graph(edges_lst):\n G = nx.Graph()\n for edge in edges_lst:\n start = edge[0]\n end = edge[1]\n weight = edge[2]\n G.add_edge(start, end, weight=weight)\n pos = nx.planar_layout(G)\n nx.draw_networkx(G, pos)\n labels = nx.get_edge_attributes(G, 'weight')\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.show()", "def showGraph(G, mate, label=\"\"):\r\n \r\n # Set the positions for all nodes and the figure size\r\n plt.close('all')\r\n plt.figure( figsize=(10, 10) )\r\n pos = nx.graphviz_layout(G, prog='sfdp', args='')\r\n \r\n # Draw the graph with node labels and a title\r\n plt.title(label)\r\n nx.draw(G, pos, node_size=400, with_labels=True)\r\n \r\n # Draw the matched edges\r\n nx.draw_networkx_edges(G, pos, edgelist=mate.items(),\r\n width=5, alpha=0.4, edge_color='b')\r\n \r\n plt.axis('off')\r\n plt.show()", "def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'", "def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()", "def disp_graph(graph, output_filename):\n dot = Graph(name=\"Graph\", format=\"png\") # instantiate a graph object\n for node in graph.keys(): # add nodes to the graph\n dot.node(str(node))\n for node in graph.keys(): # for every node in the input graph\n # for every other node in the input graph that the first node is connected to\n for other_node in graph[node].keys():\n dot.edge(str(node), str(other_node)) # create the edge\n dot.render(output_filename, view=True) # visualize the graph and save it", "def plot_graph(G):\r\n pos = nx.random_layout(G)\r\n nx.draw(G, pos)\r\n edge_labels = dict([((u, v, ), d['label']) for u, v, d in\r\n G.edges(data=True)])\r\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\r\n nx.draw_networkx_labels(G, pos, labels={i:i for i in G.nodes()},\r\n font_size=16)\r\n plt.show()", "def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()", "def plot_graph(self, input_graph, NX_GRAPHS):\n self.dgl_graph = input_graph\n self.NX_GRAPHS = NX_GRAPHS\n \n self.get_nodes()\n color_monomer = self.get_colors()\n \n print(dict(zip(range(len(self.nodes_list)), self.nodes_list)))\n print('Key Monomer is', self.nodes_list[np.argmax(self.node_weights)])\n \n fig, ax = plt.subplots()\n nx.draw_networkx(\n dgl.to_networkx(self.dgl_graph),\n arrows=False,\n node_size = 300*10**self.node_weights,\n node_color = [color_monomer[node] for node in self.nodes_list],\n font_size = 18,\n font_color = 'w',\n font_weight = 'bold',)\n\n plt.axis('off')\n ax.set_xlim([1.2*x for x in ax.get_xlim()])\n ax.set_ylim([1.2*y for y in ax.get_ylim()])\n plt.show()", "def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return", "def plot_graph(self):\n plt.axis(\"off\")\n pos = nx.kamada_kawai_layout(self.graph)\n return nx.draw_networkx(self.graph, pos=pos, node_size=400)", "def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()", "def drawGraph(A):\n m,n = A.shape\n labels = {}\n for i in range(n):\n labels[i]=str(i)\n gr = nx.from_numpy_matrix(A.T,create_using=nx.DiGraph())\n nx.draw(gr,arrows=True,node_color='#15b01a',labels=labels)\n plt.show()", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def visualize(self, A):\n G = nx.from_numpy_matrix(np.array(A))\n nx.draw(G, with_labels=True)\n plt.show()\n plt.clf()\n exit(0)", "def plot(model, pos=None, scale=1, figsize=(15, 8), verbose=3):\n out = {}\n G = nx.DiGraph() # Directed graph\n layout='fruchterman_reingold'\n\n # Extract model if in dict\n if 'dict' in str(type(model)):\n model = model.get('model', None)\n\n # Bayesian model\n if 'BayesianModel' in str(type(model)) or 'pgmpy' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on BayesianModel')\n # positions for all nodes\n pos = network.graphlayout(model, pos=pos, scale=scale, layout=layout, verbose=verbose)\n # Add directed edge with weigth\n # edges=model.edges()\n edges=[*model.edges()]\n for i in range(len(edges)):\n G.add_edge(edges[i][0], edges[i][1], weight=1, color='k')\n elif 'networkx' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on networkx model')\n G = model\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n else:\n if verbose>=3: print('[bnlearn] >Plot based on adjacency matrix')\n G = network.adjmat2graph(model)\n # Get positions\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n\n # Bootup figure\n plt.figure(figsize=figsize)\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=500, alpha=0.85)\n # edges\n colors = [G[u][v].get('color', 'k') for u, v in G.edges()]\n weights = [G[u][v].get('weight', 1) for u, v in G.edges()]\n nx.draw_networkx_edges(G, pos, arrowstyle='->', edge_color=colors, width=weights)\n # Labels\n nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\n # Get labels of weights\n # labels = nx.get_edge_attributes(G,'weight')\n # Plot weights\n nx.draw_networkx_edge_labels(G, pos, edge_labels=nx.get_edge_attributes(G, 'weight'))\n # Making figure nice\n ax = plt.gca()\n ax.set_axis_off()\n plt.show()\n\n # Store\n out['pos']=pos\n out['G']=G\n return(out)", "def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')", "def plot_dag(\n self,\n filename,\n traverser,\n node_size=500,\n label_font_size=12,\n text_angle=0,\n image_width=16,\n image_height=12,\n ):\n # map nodes to a color for their operation type\n # https://stackoverflow.com/questions/27030473/how-to-set-colors-for-nodes-in-networkx-python\n color_map = []\n colors = [\"#fbb4ae\", \"#b3cde3\", \"#ccebc5\", \"#decbe4\", \"#fed9a6\"]\n for node in self.G2:\n if self.node_map[node] == OperationType.reader.value:\n color_map.append(colors[0])\n elif self.node_map[node] == OperationType.pipeline.value:\n color_map.append(colors[1])\n elif self.node_map[node] == OperationType.model.value:\n color_map.append(colors[2])\n elif self.node_map[node] == OperationType.writer.value:\n color_map.append(colors[3])\n else:\n color_map.append(colors[4])\n\n fig = plt.figure(figsize=(image_width, image_height))\n ax = plt.subplot(111)\n ax.set_title(filename, fontsize=10)\n\n try:\n import pydot\n from networkx.drawing.nx_pydot import graphviz_layout\n except ImportError: # pragma: no cover\n raise ImportError(\n \"This example needs Graphviz and pydot.\"\n \"Please refer to the Plotting requirements in the README\"\n )\n\n # pos = nx.spring_layout(G)\n # pos = nx.circular_layout(G)\n # pos = nx.kamada_kawai_layout(G)\n # pos = nx.shell_layout(G)\n # pos = nx.spectral_layout(G)\n pos = graphviz_layout(self.G2, prog=\"dot\") # , prog='twopi', args='')\n\n nx.draw(\n self.G2,\n pos,\n node_size=node_size,\n node_color=color_map,\n edge_color=\"#939393\",\n font_size=8,\n font_weight=\"bold\",\n )\n # nx.draw_networkx_nodes(G, pos, node_color='b', node_size=500, alpha=0.8)\n\n if len(self.conditional_nodes) > 0:\n cnodes = nx.draw_networkx_nodes(\n self.G2,\n pos,\n node_color=\"#e6b655\",\n node_size=1.5 * node_size,\n alpha=0.8,\n node_shape=\"D\",\n nodelist=list(self.conditional_nodes),\n )\n cnodes.set_edgecolor(\"red\")\n\n # nx.draw_networkx_labels(self.G2,pos, font_size=9)\n\n text = nx.draw_networkx_labels(\n self.G2, pos, font_size=label_font_size\n )\n\n if traverser:\n # map node name to sequence number\n sequence = traverser.traversal_list()\n idx = list(range(1, len(sequence) + 1))\n d = dict(zip(sequence, idx))\n\n # let's plot the sequence numner above the node. How far above it?\n ys = [t._y for _, t in text.items()]\n ysrange = max(ys) - min(ys)\n offset = 0.02 * abs(ysrange)\n\n for _, t in text.items():\n t.set_rotation(text_angle)\n\n if traverser:\n plt.text(t._x, t._y + offset, d[t._text], fontsize=24, color=\"red\")\n\n plt.axis(\"off\")\n plt.tight_layout()\n plt.savefig(filename, format=\"PNG\")\n logging.info(\"Graph written to %s\" % filename)", "def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph", "def draw(self):\n nx.draw_networkx(self.rc)", "def plot_graph(self, graph, subplot=False, axes=None):\n if subplot:\n plt.sca(axes[1, 1])\n axes[1, 1].axis('off')\n else:\n plt.figure(figsize=(5, 5))\n if len(graph.nodes) == 4:\n pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}\n else:\n pos = nx.circular_layout(graph)\n nx.draw_networkx_nodes(\n graph, pos, node_size=1800, node_color='w', edgecolors='k')\n nx.draw_networkx_edges(\n graph,\n pos,\n node_size=1800,\n edge_color='k',\n arrowstyle='->',\n arrowsize=10,\n width=3)\n nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)", "def PrintGraph(self):\n # print(\"Graph has {} nodes and {} edges.\".format(Node.count, Edge.count))\n # print(\"Unique connected nodes:\")\n # for (a, b) in self.connections:\n # print(\"{},{}\".format(a.index, b.index))\n\n # print(f\"\\nAll edges : {[e.index for e in self.edges]}\")\n\n # print(\"\\nDegree of nodes\")\n\n # for node in self.nodes:\n # print(f\"D of {node.index} = {len(node.neighbours)}\")\n\n for node in self.nodes:\n print(\"{}. ({}, {})\".format(node.index, node.x, node.y))", "def draw_graph_default(graph):\r\n\r\n nx.draw_networkx(graph, with_labels=True)\r\n plt.show()", "def plot_network(graph, chars = None, show_all = False, set_width = None, output='plot'):\n if chars is not None:\n graph = graph.subgraph(chars)\n\n scaled = scale_edge_weights(graph)\n pos = nx.spring_layout(graph, k =.75 , seed = 1)\n\n #Add edges\n edge_traces, edge_text_trace = make_edges(scaled, pos, graph, show_all, set_width)\n\n #Add nodes\n node_xs = [pos[node][0] for node in scaled.nodes()]\n node_ys = [pos[node][1] for node in scaled.nodes()]\n node_text = ['<b>'+node.capitalize() for node in scaled.nodes()]\n node_hovertext = []\n for node in graph.nodes():\n node_hovertext.append(node.capitalize() + ': '+ str(graph.nodes()[node]['size']) + ' appearances')\n node_trace = go.Scatter(x = node_xs,\n y = node_ys,\n text = node_text,\n textposition = \"bottom center\",\n textfont_size = 14,\n mode = 'markers+text',\n hovertext = node_hovertext,\n hoverinfo = 'text',\n marker = dict(color = 'black',#'#6959CD',\n size = 15,\n line = None))\n layout = go.Layout(paper_bgcolor='rgba(0,0,0,0)',plot_bgcolor='rgba(0,0,0,0)')\n fig = go.Figure(layout = layout)\n\n for trace in edge_traces:\n fig.add_trace(trace)\n fig.add_trace(node_trace)\n fig.add_trace(edge_text_trace)\n\n fig.update_layout(showlegend = False, width = 1000, height = 1200)\n fig.update_xaxes(showticklabels = False)\n fig.update_yaxes(showticklabels = False)\n\n if output == 'plot':\n fig.show()\n elif output == 'return':\n return fig\n elif output == 'save':\n fig.write_image('graph.png')\n else:\n fig.show()", "def create_graph_network_visualization(graph_network, connections, connections_grouped):\n\n edge_trace = go.Scatter(\n x=[],\n y=[],\n customdata=[],\n text=[],\n line=dict(width=2, color='#888'),\n hoverinfo='all',\n mode='lines+text',\n textposition='top left',\n )\n edge_label_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n textposition='top left',\n mode='markers+text',\n hoverinfo='none',\n marker=go.Marker(\n opacity=0\n ),\n textfont=dict(size=20, color='black')\n )\n\n for edge in graph_network.edges():\n x0, y0 = graph_network.node[edge[0]]['pos']\n x1, y1 = graph_network.node[edge[1]]['pos']\n edge_weight = graph_network.node[edge[1]]['pos']\n edge_trace['x'] += tuple([x0, x1, None])\n edge_trace['y'] += tuple([y0, y1, None])\n\n text = graph_network[edge[0]][edge[1]]['weight']\n edge_label_trace['x'] += tuple([(x0 + x1) / 2])\n edge_label_trace['y'] += tuple([(y0 + y1) / 2])\n edge_label_trace['text'] += tuple([text])\n\n # writing to edge customdata\n edge_trace['customdata'] += graph_network[edge[0]][edge[1]]['weight']\n edge_trace['text'] = str(graph_network[edge[0]][edge[1]]['weight'])\n # edge_trace['marker']['size'] += professor_graph[edge[0]][edge[1]]['weight']\n # print(graph_network[edge[0]][edge[1]]['weight'])\n\n node_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n hovertext=[],\n mode=\"markers+text\",\n hoverinfo='text',\n textposition='bottom center',\n marker=dict(\n showscale=False,\n # colorscale options\n # ['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',\n # 'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',\n # 'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis]\n colorscale='YlGnBu',\n reversescale=True,\n color=[],\n size=40,\n colorbar=dict(\n thickness=15,\n title='Node Connections',\n xanchor='left',\n titleside='right'\n ),\n line=dict(width=2))\n )\n\n entry_bool = True\n\n for node in graph_network.nodes():\n x, y = graph_network.node[node]['pos']\n node_trace['x'] += tuple([x])\n node_trace['y'] += tuple([y])\n # node_trace['text'].append(node)\n\n # x, y = professor_graph.node[node]['pos']\n # node_trace['x'].append(x)\n # node_trace['y'].append(y)\n\n if entry_bool:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n entry_bool = False\n total_projects = \"Total Projects: {}\".format(len(connections[\"Proposal Number:\"].unique()))\n print(\"Total Projects\", total_projects)\n node_trace['hovertext'] += tuple([total_projects])\n else:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n some_text = []\n some_text.append(node + \"<br>\")\n for i in range(len(connections_grouped.loc[node]['proposal_number'])):\n if i > 0:\n some_text.append(\"<br>\")\n print(\"list index is \", i)\n print(\"prop number is \", connections_grouped.loc[node]['proposal_number'][i])\n some_text.append(connections_grouped.loc[node]['proposal_number'][i])\n # import pdb\n # pdb.set_trace()\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['proposal_title'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['project_status'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['institution'][i])\n some_text.append(\"<br>\")\n some_text = [x for x in some_text if str(x) != 'nan']\n\n some_text = \"\".join(some_text)\n print(node)\n print(\"yo is \", some_text)\n # node_trace['hovertext'].append(some_text)\n node_trace['hovertext'] += tuple([some_text])\n\n for node, adjacencies in enumerate(graph_network.adjacency_list()):\n # print(node,adjacencies)\n # print(professor_graph[node])\n node_trace['marker']['color'] += tuple([len(adjacencies)])\n\n return node_trace, edge_trace, edge_label_trace", "def visualize(G, color=None, figsize=(5, 5)):\n plt.figure(figsize=figsize)\n plt.xticks([])\n plt.yticks([])\n nx.draw_networkx(G,\n pos=nx.spring_layout(G, seed=42),\n with_labels=True,\n node_color=color,\n cmap=\"Set2\")\n plt.show();", "def draw_network(graph, users, filename):\n ###TODO-- Completed\n candidate_names = [user['screen_name'] for user in users]\n plt.figure(figsize=(12,12))\n candidate_labels = {node: node if node in candidate_names else '' for node in graph.nodes_iter()}\n #print(candidate_labels)\n nx.draw_networkx(graph, labels=candidate_labels, alpha=0.5, node_color='r', node_size=100, width=0.1)\n #plt.show()\n plt.axis('off')\n plt.savefig(filename)\n #pass", "def draw_network(graph, filename):\n plt.figure(figsize=(12,12))\n nx.draw_networkx(graph, with_labels=False, alpha=.5, width=.1, node_size=100)\n plt.axis(\"off\")\n plt.savefig(filename, format=\"PNG\")", "def plot_system_topology(graph):\n\n plt.figure(figsize=(10,8))\n plt.title('System Topology')\n nx.draw(graph,\n pos=graphviz_layout(graph),\n node_size = [16 * graph.degree(n) for n in graph],\n with_labels = True,\n node_color = 'grey',\n font_size = 10,\n alpha = 0.5\n )", "def plot_graph(station_graph):\n G = nx.DiGraph()\n edge_labels = {graph[0]: graph[1] for graph in station_graph}\n node_labels = {graph[0]: graph[0][1] for graph in station_graph}\n for graph in station_graph:\n G.add_edge(graph[0][0], graph[0][1])\n red_edges = [station_graph[0][0]]\n blue_edges = [edge for edge in G.edges() if edge not in red_edges]\n pos = nx.spring_layout(G)\n nx.draw_networkx_nodes(G, pos, node_color='green', node_size=200)\n nx.draw_networkx_labels(G, pos, node_labels=node_labels)\n nx.draw_networkx_edges(G, pos, edgelist=red_edges, edge_color='r', arrows=True)\n nx.draw_networkx_edges(G, pos, edgelist=blue_edges, edge_color='b', arrows=True, arrowsize=10)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def plot(model, pos=None, scale=1, figsize=(15, 8), interactive=False, title='bnlearn causal network', params = {'directed':True, 'height':'800px', 'width':'70%', 'notebook':False, 'layout':None, 'font_color': False, 'bgcolor':'#ffffff'}, verbose=3):\n defaults = {'directed':True, 'height':'800px', 'width':'70%', 'notebook':False, 'heading':title, 'layout':None, 'font_color': False, 'bgcolor':'#ffffff'}\n params = {**defaults, **params}\n\n out = {}\n G = nx.DiGraph() # Directed graph\n layout='fruchterman_reingold'\n\n # Extract model if in dict\n if 'dict' in str(type(model)):\n adjmat = model.get('adjmat', None)\n model = model.get('model', None)\n\n # Bayesian model\n if 'BayesianModel' in str(type(model)) or 'pgmpy' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on BayesianModel')\n # positions for all nodes\n pos = bnlearn.network.graphlayout(model, pos=pos, scale=scale, layout=layout, verbose=verbose)\n # Add directed edge with weigth\n # edges=model.edges()\n edges=[*model.edges()]\n for i in range(len(edges)):\n G.add_edge(edges[i][0], edges[i][1], weight=1, color='k')\n elif 'networkx' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on networkx model')\n G = model\n pos = bnlearn.network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n else:\n if verbose>=3: print('[bnlearn] >Plot based on adjacency matrix')\n G = bnlearn.network.adjmat2graph(adjmat)\n # Get positions\n pos = bnlearn.network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n\n # Make interactive or static plot\n if interactive:\n try:\n from pyvis import network as net\n from IPython.core.display import display, HTML\n # Convert adjacency matrix into Networkx Graph\n G = bnlearn.network.adjmat2graph(adjmat)\n # Setup of the interactive network figure\n g = net.Network(**params)\n # g = net.Network(directed=True, height='800px', width='70%', notebook=False, heading=title)\n g.from_nx(G)\n # Create advanced buttons\n g.show_buttons(filter_=['physics'])\n # Display\n filename = title.strip().replace(' ','_') + '.html'\n g.show(filename)\n display(HTML(filename))\n # webbrowser.open('bnlearn.html')\n except ModuleNotFoundError:\n if verbose>=2: print('[bnlearn] >\"pyvis\" module is not installed. Please pip install first: \"pip install pyvis\"')\n else:\n # Bootup figure\n plt.figure(figsize=figsize)\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=500, alpha=0.85)\n # edges\n colors = [G[u][v].get('color', 'k') for u, v in G.edges()]\n weights = [G[u][v].get('weight', 1) for u, v in G.edges()]\n nx.draw_networkx_edges(G, pos, arrowstyle='->', edge_color=colors, width=weights)\n # Labels\n nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\n # Get labels of weights\n # labels = nx.get_edge_attributes(G,'weight')\n # Plot weights\n nx.draw_networkx_edge_labels(G, pos, edge_labels=nx.get_edge_attributes(G, 'weight'))\n # Making figure nice\n ax = plt.gca()\n ax.set_axis_off()\n plt.show()\n\n # Store\n out['pos']=pos\n out['G']=G\n return(out)", "def print_graph(dag, image_path, graph_path):\n for node in dag.nodes():\n dag.node[node]['label'] = node.label\n nx.write_graphml(dag, graph_path)\n pos = nx.random_layout(dag)\n nx.draw_networkx(dag, ax=None, width=3, pos=pos)\n p.savefig(image_path)", "def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)", "def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"", "def draw_network(G, ds, n = 5, label = False):\n\n top_n = top_n_users(ds,5)\n top_n = [int(i[0]) for i in top_n]\n H = G.subgraph(top_n)\n for m in top_n:\n child = ds[m]\n for item in child:\n H.add_edge(m,item)\n\n print \"Drawing figure...\"\n\n fig = plt.figure()\n nx.draw(H,pos=nx.spring_layout(H), node_size = 1, alpha = 0.25,\n width = 0.25, with_labels = label)\n fig.suptitle('Top 5 nodes by 1st degree connection', fontsize=20)\n# plt.savefig(\"images/TopN.png\", format=\"PNG\")\n plt.show()", "def drawGraph(G, novel_title):\n # Drawing with network x\n page_rank = nx.pagerank(G)\n \n pos = nx.nx_pydot.graphviz_layout(G)\n plt.figure(figsize=(15,10))\n\n font = {'fontsize' : 14}\n plt.title('Character Network for: ' + novel_title, font)\n \n label_pos = {}\n for i in pos:\n label_pos[i] = (pos[i][0] , pos[i][1] - (math.exp(page_rank[i]) * 12))\n \n labels = nx.draw_networkx_labels(G, label_pos, font_weight = 'bold', font_size = 9)\n nodes = nx.draw_networkx_nodes(G, pos, \n node_size = [2000 * page_rank[i] for i in list(nx.nodes(G))],\n node_color = range(len(nx.pagerank(G))),\n cmap = plt.cm.Spectral)\n \n nodes.set_edgecolor('black')\n \n nx.draw_networkx_edges(G, pos, edge_color = 'grey', alpha = .70)\n plt.axis('off')\n plt.savefig('test.png')\n plt.show()", "def draw_graph(grph, edge_labels=True, node_color='#AFAFAF',\n edge_color='#CFCFCF', plot=True, node_size=2000,\n with_labels=True, arrows=True, layout='neato'):\n if type(node_color) is dict:\n node_color = [node_color.get(g, '#AFAFAF') for g in grph.nodes()]\n\n # set drawing options\n options = {\n 'prog': 'dot',\n 'with_labels': with_labels,\n 'node_color': node_color,\n 'edge_color': edge_color,\n 'node_size': node_size,\n 'arrows': arrows\n }\n\n # draw graph\n pos = nx.drawing.nx_agraph.graphviz_layout(grph, prog=layout)\n\n nx.draw(grph, pos=pos, **options)\n\n # add edge labels for all edges\n if edge_labels is True and plt:\n labels = nx.get_edge_attributes(grph, 'weight')\n nx.draw_networkx_edge_labels(grph, pos=pos, edge_labels=labels)\n\n # show output\n if plot is True:\n plt.show()", "def show_custom_graph(self):\n pass", "def viz_graph(self, show_ports=False, pydot_options=None):\n import networkx as nx\n G = nx.DiGraph()\n if pydot_options:\n G.graph['graph'] = pydot_options\n # instantiate objects\n for itask in self:\n task_inputs = itask[TaskSpecSchema.inputs]\n to_task = itask[TaskSpecSchema.task_id]\n to_type = itask[TaskSpecSchema.node_type]\n if to_task == \"\":\n to_task = OUTPUT_TYPE\n for iport_or_tid in task_inputs:\n # iport_or_tid: it is either to_port or task id (tid) b/c\n # if using ports API task_inputs is a dictionary otherwise\n # task_inputs is a list.\n taskin_and_oport = task_inputs[iport_or_tid] \\\n if isinstance(task_inputs, dict) else iport_or_tid\n isplit = taskin_and_oport.split('.')\n from_task = isplit[0]\n from_port = isplit[1] if len(isplit) > 1 else None\n if show_ports and from_port is not None:\n to_port = iport_or_tid\n common_tip = taskin_and_oport\n G.add_edge(from_task, common_tip, label=from_port)\n G.add_edge(common_tip, to_task, label=to_port)\n tnode = G.nodes[common_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n else:\n G.add_edge(from_task, to_task)\n\n # draw output ports\n if show_ports:\n\n if (to_type == OUTPUT_TYPE):\n continue\n task_node = get_node_obj(itask, tgraph_mixin=True)\n # task_outputs = itask.get(TaskSpecSchema.outputs, [])\n for pout in task_node._get_output_ports():\n out_tip = '{}.{}'.format(\n itask[TaskSpecSchema.task_id], pout)\n G.add_edge(to_task, out_tip, label=pout)\n tnode = G.nodes[out_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n return G", "def represent_graph(graph, n):\n \n fig, ax = plt.subplots()\n \n # Graphical representation : \n # each vertex define the absciss\n all_x = graph.keys()\n # ordinate is a random number between 0 and n \n all_y = n*np.random.rand(n)\n\n for vertex in graph:\n # for each vertex in the graph\n # get its coordinate \n x = vertex\n y = all_y[x]\n \n # represent it\n represent_vertex(ax, x, y)\n \n # get its neighbours\n neighbours = Neighbours(graph, vertex)\n \n for neighbour in neighbours :\n # for each neighbour of the vertex\n # draw an array from the vertex to its neighbour\n x_neighbour, y_neighbour = neighbour, all_y[neighbour]\n represent_link(ax, x, y, x_neighbour, y_neighbour)\n \n # Definition of the window\n plt.xlim(0,n)\n plt.ylim(0,n)\n plt.title('Graph')\n \n # Save the picture in Graph.png\n plt.savefig('Graph.png')\n plt.show()\n \n #return the graphical representation used\n return all_x, all_y", "def display_graph(self, color_of_vertex):\r\n import matplotlib.pyplot\r\n import networkx\r\n G = networkx.Graph()\r\n color_set = ['#FF0000', '#32CD32', '#FFD700', '#6B8E23', '#40E0D0', '#BA55D3', '#C0C0C0', '#A0522D', '#6A5ACD']\r\n color_map = []\r\n for k in self.__graph_dict.keys():\r\n G.add_node(k)\r\n for v in self.__graph_dict[k]:\r\n G.add_edge(k,v)\r\n for x in G.node:\r\n color_map.append(color_set[color_of_vertex[x]])\r\n networkx.draw(G, node_color = color_map, with_labels = True)\r\n matplotlib.pyplot.show()", "def show_dag(self, expand=set()):\n from matplotlib.pyplot import show as pltshow\n\n G = self.make_dag(expand=expand)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ## Plotting\n edge_labels = dict(\n [((u, v,), d[\"label\"]) for u, v, d in G.edges(data=True)]\n )\n n = G.size()\n\n ## Manual layout\n # if n == 2:\n if False:\n pos = {\n \"(var)\": [-0.5, +0.5],\n \"(out)\": [+0.5, -0.5],\n }\n pos[self.functions[0].name] = [+0.5, +0.5]\n ## Optimized layout\n else:\n try:\n ## Planar, if possible\n pos = nx.planar_layout(G)\n except nx.NetworkXException:\n ## Scaled spring layout\n pos = nx.spring_layout(\n G,\n k=0.6 * n,\n pos={\n \"(Inputs)\": [-0.5 * n, +0.5 * n],\n \"(Outputs)\": [+0.5 * n, -0.5 * n],\n },\n fixed=[\"(var)\", \"(out)\"],\n threshold=1e-6,\n iterations=100,\n )\n\n # Generate colormap\n color_map = []\n for node in G:\n if G.nodes[node][\"parent\"] == self.name:\n color_map.append(\"blue\")\n else:\n color_map.append(\"green\")\n\n # Draw\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n nx.draw(G, pos, node_size=1000, with_labels=True, node_color=color_map)\n pltshow()", "def plot_resiliences(nodes, network_vals, er_vals, upa_vals):\n node_vals = range(0, nodes)\n\n plt.plot(node_vals, network_vals, '-b', label='Network')\n plt.plot(node_vals, er_vals, '-r', label='ER')\n plt.plot(node_vals, upa_vals, '-g', label='UPA')\n\n plt.legend(loc='upper right')\n plt.ylabel('Size of Largest Connected Component')\n plt.xlabel('Number of Nodes Removed')\n plt.grid(True)\n plt.title('Comparison of Graph Resilience\\nMeasured by Largest Connected Component vs Nodes Removed by Target Attack\\n')\n plt.show()", "def show_edges(self):\n for element in self.graph:\n print(element, self.graph[element])", "def print_graph() -> None:\n raise NotImplementedError", "def draw_graph(self, node_size=2000, node_color='yellow', edge_color='red'):\n G, node_label_dict = self.make_graph()\n edge_label_dict = {(c.source_name, c.target_name):(c.params.kernel_size) for c in self.layers}\n plt.figure(figsize=(12,12))\n pos = nx.nx_pydot.graphviz_layout(G, prog='dot')\n nx.draw(G, pos, node_size=node_size, node_color=node_color, edge_color=edge_color,alpha=0.4)\n nx.draw_networkx_labels(G, pos, node_label_dict, font_size=10,font_weight=640, alpha=0.7, font_color='black')\n nx.draw_networkx_edge_labels(G, pos, edge_label_dict, font_size=20, font_weight=640,alpha=0.7, font_color='red')\n plt.show()", "def draw_graph(self, out_path):\n # Define layout for network, with increased distance between nodes\n spring_layout = nx.spring_layout(self.graph, k=math.sqrt(self.graph.order()))\n\n # Draw network nodes\n nx.draw_networkx_nodes(self.graph, spring_layout, node_size=10, node_color=\"steelblue\", alpha=0.7)\n # Draw network edges\n nx.draw_networkx_edges(self.graph, spring_layout, width=0.5, alpha=0.3)\n # Draw network labels\n nx.draw_networkx_labels(self.graph, spring_layout, font_size=5, verticalalignment=\"bottom\")\n\n # Save the graph\n plt.savefig(out_path, dpi=300, bbox_inches=\"tight\")", "def visualize_graph(self):\n self._graph.GetVertexData().AddArray(self._labels)\n self._graph.GetEdgeData().AddArray(self._weights)\n colors = vtk.vtkUnsignedCharArray()\n colors.SetNumberOfComponents(1)\n colors.SetName('Colors')\n types = int(245 / len(self._color_dict))\n for c in self._colors:\n colors.InsertNextValue(int(c * types))\n self._graph.GetVertexData().AddArray(colors)\n graphLayoutView = vtk.vtkGraphLayoutView()\n graphLayoutView.AddRepresentationFromInput(self._graph)\n graphLayoutView.SetLayoutStrategy(vtk.vtkSpanTreeLayoutStrategy())\n graphLayoutView.GetLayoutStrategy().SetEdgeWeightField(\"Weights\")\n graphLayoutView.GetLayoutStrategy().SetWeightEdges(1)\n graphLayoutView.GetRenderer().GetActiveCamera().ParallelProjectionOff()\n graphLayoutView.SetEdgeLabelArrayName(\"Weights\")\n graphLayoutView.SetEdgeLabelVisibility(1)\n graphLayoutView.SetVertexLabelArrayName('labels')\n graphLayoutView.SetVertexLabelVisibility(1)\n graphLayoutView.SetVertexColorArrayName('Colors')\n graphLayoutView.SetColorVertices(1)\n graphLayoutView.SetInteractorStyle(MouseAndKeysInteractor(graphLayoutView))\n graphLayoutView.ResetCamera()\n graphLayoutView.Render()\n graphLayoutView.GetInteractor().Start()", "def draw_graph(self):\n\t\tif None in self.graph:\n\t\t\tdel self.graph[None]\n\n\t\tfor vs in self.graph.itervalues():\n\t\t\tto_delete = []\n\t\t\tfor i in xrange(len(vs)):\n\t\t\t\tif vs[i] is None:\n\t\t\t\t\tto_delete.append(i)\n\n\t\t\tfor i in reversed(to_delete):\n\t\t\t\tdel vs[i]\n\n\t\tself.G=nx.Graph(self.graph)\n\n\t\tfor k,v in self.labels.iteritems():\n\t\t\tif v[:6] == 'Module':\n\t\t\t\troot = k\n\t\t\t\tbreak\n\n\t\treturn self.__dfs_plot(root)", "def draw_graph(G, pos=None):\n if not pos:\n pos = nx.spring_layout(G)\n\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 10\n fig_size[1] = 8\n\n plt.figure()\n\n nx.draw_networkx_nodes(G, pos)\n nx.draw_networkx_edges(G, pos)\n nx.draw_networkx_labels(G, pos)\n\n for node in G.nodes():\n x, y = pos[node]\n plt.text(x, y + 0.1, \"${}$\".format(latex(G.formula_conj(node))), fontsize=16, horizontalalignment='center')\n \n plt.axis(\"off\")", "def getGraphFigure(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n return fig", "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def fig4():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 0, 0],\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def print_model_graph(self, name=None, agent=([], [], [])):\n dot = pygraphviz.AGraph(directed=\"True\")\n for outp in list(self.outputs.keys()):\n dot.add_node(outp, pos=(outp[1:] + \",10\"), color=\"red\", label=outp + \", \" + str(self.outputs[outp].taking.size) + \"-\" + self.outputs[outp].taking.type)\n for inp in list(self.inputs.keys()):\n dot.add_node(inp, pos=(inp[1:] + \",0\"), color=\"blue\", label=inp + \", \" + str(self.inputs[inp].producing.size) + \"-\" + self.inputs[inp].producing.type)\n for comp in list(self.networks.keys()):\n dot.add_node(comp, label=comp + \"-\" + str(type(self.networks[comp].descriptor).__name__)[:-14] + \":\" + str(self.networks[comp].taking.size) + \"-\" + str(self.networks[comp].producing.size))\n\n for c in self.connections:\n con = self.connections[c]\n if self.conn_in_agent(con, agent[0]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"blue\")\n elif self.conn_in_agent(con, agent[1]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"red\")\n elif self.conn_in_agent(con, agent[2]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"green\")\n else:\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"black\")\n dot.layout('dot')\n if not name:\n name = str(hash(self))\n dot.draw(name + '.pdf')", "def visualize(self, filename, options = {'showHead'}):\n\t\tVisualizer.useGraphViz(self, filename, options)", "def plot_spn(root: Node, f: Union[IO, os.PathLike, str]):\n # Convert the SPN to a NetworkX directed graph\n graph = spn_to_digraph(root)\n\n # Build the dictionaries of node labels and colors\n labels = dict()\n colors = dict()\n for node_id in graph.nodes:\n attr = graph.nodes[node_id]\n name = attr['class']\n if name == Sum.__name__:\n label = '+'\n color = '#083d77'\n for child_id, _ in graph.in_edges(node_id):\n idx = graph.edges[child_id, node_id]['idx']\n graph.edges[child_id, node_id]['weight'] = round(attr['weights'][idx], ndigits=2)\n elif name == Product.__name__:\n label = 'x'\n color = '#bf3100'\n else:\n label = repr(attr['scope']).replace(',', '')\n color = '#542188'\n labels[node_id] = label\n colors[node_id] = color\n\n # Compute the nodes positions using PyDot + Graphviz\n pos = nx_pydot.graphviz_layout(graph, prog='dot')\n pos = {node_id: (x, -y) for node_id, (x, y) in pos.items()}\n pos = rescale_layout_dict(pos)\n\n # Set the figure size\n figdim = np.maximum(2, np.sqrt(graph.number_of_nodes() + 2 * graph.number_of_edges()))\n plt.figure(figsize=(figdim, figdim))\n\n # Draw the nodes and edges\n nx.draw_networkx(\n graph, pos=pos, node_color=[colors[node_id] for node_id in graph.nodes],\n labels=labels, arrows=True, font_size=8, font_color='#ffffff'\n )\n nx.draw_networkx_edge_labels(\n graph, pos=pos, edge_labels=nx.get_edge_attributes(graph, 'weight'),\n rotate=False, font_size=8, font_color='#000000'\n )\n\n # Plot the final figure\n plt.tight_layout()\n plt.axis('off')\n plt.savefig(f, bbox_inches='tight', pad_inches=0)\n plt.clf()", "def graph_genotype_GUI(genotype, subp):\n\n\t# create the networkx graph and associated node position for genotype\n\tgraph = genotype.gen_networkx_graph()\n\tpos = genotype.gen_positions_for_networkx(graph)\n\t\n\t# add all nodes into graph with colors\n\tfor node in genotype.nodes:\n\t\tcolor = NODE_TO_COLOR[node.getActKey()]\n\t\tnx.draw_networkx_nodes(graph, pos,\n\t\t\t\t\t\t\t\tax=subp, \n\t\t\t\t\t\t\t\tnodelist=[node.getNodeNum()],\n\t\t\t\t\t\t\t\tnode_color=color,\n\t\t\t\t\t\t\t\tnode_size=400, alpha=0.8)\n\t# add all connections into graph with colors\n\tfor con in genotype.connections:\n\t\tcolor = 'b' if con.getWeight() < 0 else 'r'\n\t\tedge_tuple = (con.getNodeIn().getNodeNum(), \n\t\t \t\t\t\tcon.getNodeOut().getNodeNum())\n\t\tnx.draw_networkx_edges(graph, pos,\n\t\t \t\t\t\t\t\tax=subp,\n\t\t \t\t\t\t\t\tedgelist = [edge_tuple],\n\t\t \t\t\t\t\t\twidth=3, alpha=0.5, \n\t\t \t\t\t\t\t\tedge_color=color, arrows=True)\n\t\t\n\t# add innovation number labels for connections\n\tlabels = nx.get_edge_attributes(graph, 'i')\n\tnx.draw_networkx_edge_labels(graph, pos, ax=subp, labels=labels)\n\n\t# create graph with title/legend and display\n\tplt.title(\"CPPN Genotype Visualization\")\n\tsubp.legend(handles=PATCH_LIST, loc='upper right')", "def draw_graph(graph, start, goal, path=[], save_file=None):\n explored = graph.get_explored_nodes()\n node_pos = {n: graph.nodes[n]['pos'] for n in graph.nodes.keys()}\n edge_labels = {}\n for edge in graph.edges():\n edge_labels[edge] = graph[edge[0]][edge[1]]['weight']\n\n labels = {}\n for node in graph:\n labels[node] = node\n\n nx.draw_networkx_nodes(graph, node_pos, node_color='gray') #, nodelist=romania.nodes, node_color='w', node_size=500)\n nx.draw_networkx_edges(graph, node_pos, style='dashed')\n if len(explored) > 0:\n print(\"Explored = \"+str(explored))\n nx.draw_networkx_nodes(graph, node_pos, nodelist=explored, node_color='r')\n\n if len(path) > 0:\n nx.draw_networkx_nodes(graph, node_pos, nodelist= path, node_color='y')\n edgelist = []\n for i in range(1,len(path)):\n edgelist.append((path[i - 1], path[i]))\n nx.draw_networkx_edges(graph, node_pos, edgelist, edge_color='b', width=3)\n nx.draw_networkx_nodes(graph, node_pos, nodelist=[start, goal], node_color='g')\n\n\n\n nx.draw_networkx_labels(graph, node_pos, labels)\n nx.draw_networkx_edge_labels(graph, node_pos, edge_labels, font_size=8)\n\n plt.axis('off')\n plt.show() # display\n if save_file is not None:\n plt.savefig(save_file) # save as png", "def plot_nodes(self, filename, **kwargs):\n\n g = graph.create_nx_graph(self.es, filename=filename, **kwargs)\n\n return g", "def plot_network(path, saveas=None, **kwargs):\n if saveas is None:\n saveas = \"_srcnetwork.html\"\n fn = FileNetwork(path, **kwargs)\n nt = Network(\"1500px\", \"1500px\")\n nt.toggle_physics(True)\n nt.from_nx(fn.network)\n nt.set_options(get_pyvis_options())\n nt.show(f\"{saveas}\")\n return", "def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()", "def showGraph(self):\r\n self.graph_button['state'] = 'disabled'\r\n # Draw connection Graph\r\n self.axGraph.set_visible(True)\r\n nx.draw(self.G, ax=self.axGraph, with_labels=True)\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def plot_bare_graph(self, show_plot=True, clf: bool = True):\n\n if clf:\n for i in plt.get_fignums():\n if plt.figure(i).get_label()[0:5] == \"(NXG)\":\n plt.close(plt.figure(i).get_label())\n # Close plot with the same name as the one we're creating (if applies)\n for i in plt.get_fignums():\n if plt.figure(i).get_label() == f\"(NXG) GEU {self.catalog}\":\n plt.close(f\"(NXG) GEU {self.catalog}\")\n # Create plot\n plt.figure(f\"(NXG) GEU {self.catalog}\")\n\n # Set node colors by domain\n\n domain_palette = ['#74299E',\n '#235785',\n '#7C1F48',\n '#B48121',\n '#5D6814',\n '#0F5A0F',\n '#818E19',\n '#1818A8',\n '#0300A7']\n colors = {'TRANSPORTE - TX': domain_palette[0],\n 'TRANSPORTE - DX': domain_palette[1],\n 'TX - RADIOENLACES Y SATELITAL': domain_palette[2],\n 'ACCESO - FIJA': domain_palette[3],\n 'ACCESO - MOVIL': domain_palette[4],\n 'CORE VOZ': domain_palette[5],\n 'ENTORNO': domain_palette[6],\n 'CMTS': domain_palette[7],\n 'Other': domain_palette[8]}\n\n # If GEU has many domains, paint each node with its corresponding color\n if self.has_multiple_domains:\n color_map = []\n for node in self.graph.nodes:\n for mat in self.materials:\n # If it finds a match, use object Material to get node's domain\n if mat.catalog == node:\n domain = mat.domain\n color_map.append(colors[domain])\n color_map_in_use = color_map\n # If that's not the case, the only color is the corresponding one\n else:\n try:\n color_map_in_use = colors[self.domain]\n except:\n color_map_in_use = domain_palette[7]\n\n # Plot graph\n nx.draw(self.graph, with_labels=True, node_color=color_map_in_use)\n if show_plot:\n plt.show()\n else:\n return plt", "def plot(self):\n return self.graph(edge_labels='words_in_out').plot()", "def plot_graph(graph, labels=None):\n if labels is not None:\n unique_labels = set([v for _, v in labels.items()])\n colors = np.arange(0, 1, 1. / len(unique_labels))\n colors_list = [colors[labels[node]] for node in graph.nodes]\n else:\n colors_list = None\n pos = networkx.spring_layout(graph)\n networkx.draw_networkx_nodes(graph, pos, cmap=plt.get_cmap('jet'), node_color=colors_list,\n node_size=500)\n networkx.draw_networkx_labels(graph, pos)\n networkx.draw_networkx_edges(graph, pos, edgelist=graph.edges, edge_color='r', arrows=True)\n plt.show()", "def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))", "def print_graph(self, filename='', save=False):\n nx.draw_circular(self.graph, node_color='pink', node_size=1000, with_labels=True)\n if save:\n plt.savefig(filename)\n print(f'Saved graph as {filename!r}')\n else:\n plt.show()", "def get_graphs(self):\n\n try:\n from keras.utils import plot_model\n from keras.utils.vis_utils import model_to_dot\n\n # from IPython.display import SVG\n\n plot_model(self.model, to_file=\"model.png\")\n plot_model(\n self.latent_to_states_model, to_file=\"latent_to_states_model.png\"\n )\n plot_model(self.batch_model, to_file=\"batch_model.png\")\n if self.mol_to_latent_model is not None:\n plot_model(self.mol_to_latent_model, to_file=\"mol_to_latent_model.png\")\n\n print(\"Models exported to png files.\")\n\n except:\n print(\"Check pydot and graphviz installation.\")", "def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c", "def draw(self):\n g = self.to_networkx()\n pos = nx.spring_layout(g)\n nx.draw_networkx_edges(g, pos,\n edge_color=EDGE_COLOR,\n width=EDGE_WIDTH)\n obj = nx.draw_networkx_nodes(g, pos, nodelist=self.vs.values(),\n node_size=NODE_SIZE,\n node_color=NODE_COLOR_NORMAL)\n obj.set_linewidth(NODE_BORDER_WIDTH)\n obj.set_edgecolor(NODE_BORDER_COLOR)\n nx.draw_networkx_nodes(g, pos, nodelist=self.fs,\n node_size=FACTOR_NODE_SIZE,\n node_color=FACTOR_NODE_COLOR,\n node_shape=FACTOR_NODE_SHAPE)\n nx.draw_networkx_labels(g, pos, {v: v.name\n for v in self.vs.values()},\n font_color=LABEL_COLOR)", "def draw_graph(self, fpath):\n import networkx as nx\n G = self.to_networkx()\n A = nx.nx_agraph.to_agraph(G)\n\n for proc in self.procs.values():\n nbunch = [proc.name]\n nbunch += [iport.absname() for iport in proc.iports.values()]\n nbunch += [oport.absname() for oport in proc.oports.values()]\n A.add_subgraph(\n nbunch, name='cluster_' + proc.name,\n color='lightgray', style='filled', fillcolor='lightgray')\n # color=lightgray;style=filled;fillcolor=lightgray;\n A.layout(prog='dot')\n A.draw(fpath)", "def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")", "def show_graph(edges, vertices, name):\n dot = Digraph(comment=name)\n\n # Add vertices to directed graph\n for v in vertices:\n dot.node(str(v[0]), v[1][\"read\"])\n\n # Add edges to directed graph\n for i, e in enumerate(edges):\n dot.edge(str(e[0]), str(e[1]), label=f\"{str(e[2]['weight'])}: {e[2]['match']}\")\n\n # Render graph and show it in browser\n dot.render(name, view=True)", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def plotStats(stats):\n line_width = 1\n\n font = {'family': 'DejaVu Sans',\n 'weight': 'bold',\n 'size': 10}\n\n matplotlib.rc('font', **font)\n\n plt.figure(1)\n aw = plt.subplot(211)\n aw.set_title(\"Graph grouping nodes by their number of relationships\")\n\n aw.plot(list(stats.keys()), list(stats.values()), 'r', linewidth=line_width)\n aw.grid(True)\n\n plt.xlabel('Number of edges')\n plt.ylabel('Number of nodes')\n plt.savefig(\"edgesStats.pdf\",\n dpi=300, format='pdf', papertype='a0')", "def draw_networkx(graph, ax=None, fig=None, nodes=True, edges=True, **kwargs):\n import networkx as nx\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig,ax = plt.subplots(1, 1, figsize=(5,5)) \n else:\n fig = ax.get_figure()\n \n \n # Determine a fine size for nodes\n bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n area = width * height * fig.dpi\n \n \n # format nx\n if isinstance(graph, nx.Graph):\n G = graph.copy()\n else:\n G = format_networkx(dict(graph), **kwargs)\n\n # pos\n pos = nx.get_node_attributes(G, \"pos\")\n if pos is None or not len(list(pos)):\n pos = get_layout_pos(G, **kwargs)\n kwargs.update(pos = pos)\n\n \n # draw edges\n if edges is True:\n edge_zorder = kwargs.pop(\"edge_zorder\", kwargs.pop(\"zorder\", None))\n if kwargs.get(\"width\") is None:\n edge_w = [np.sqrt(_) for u,v,_ in G.edges(data='size')]\n kwargs.update(width=edge_w)\n\n if kwargs.get(\"edge_color\") is None:\n edge_c = [_ for u,v,_ in G.edges(data=\"color\")]\n if not any(edge_c):\n edge_c = ['grey' for _ in G.edges()]\n kwargs.update(edge_color=edge_c)\n\n # only pass relevant keyword arguments to nx.draw_networkx_edges\n draw_networkx_edges_kwargs = getfullargspec(nx.draw_networkx_edges).args\n draw_networkx_edges_kwargs = {\n k: kwargs.get(k) for k in draw_networkx_edges_kwargs if k in kwargs\n }\n\n # draw\n edges = nx.draw_networkx_edges(G, ax=ax, **draw_networkx_edges_kwargs)\n if edge_zorder is not None:\n edges.set_zorder(edge_zorder)\n\n \n # draw nodes\n if nodes is True:\n node_s0 = 0.5 * np.pi * area / G.number_of_nodes()\n node_r = np.sqrt(node_s0 / np.pi)\n node_edge = node_r / 3\n node_edge = kwargs.pop(\"node_edge\", node_edge)\n node_edge_color = kwargs.pop(\"node_edge_color\", \"k\") \n node_zorder = kwargs.pop(\"node_zorder\", kwargs.pop(\"zorder\", None))\n\n if kwargs.get(\"node_size\") is None:\n node_s = [node_s0 * np.sqrt(_) for n,_ in G.nodes(data=\"size\")]\n kwargs.update(node_size=node_s)\n\n if kwargs.get(\"node_color\") is None:\n node_c = [_ for n,_ in G.nodes(data=\"color\")]\n if not any(node_c):\n node_c = [_ for n,_ in G.nodes(data=\"group\")]\n kwargs.update(node_color=node_c)\n \n # only pass relevant keyword arguments to nx.draw_networkx_nodes\n draw_networkx_nodes_kwargs = getfullargspec(nx.draw_networkx_nodes).args\n draw_networkx_nodes_kwargs = {\n k: kwargs.get(k) for k in draw_networkx_nodes_kwargs if k in kwargs\n }\n\n # draw\n nodes = nx.draw_networkx_nodes(G, ax=ax, **draw_networkx_nodes_kwargs)\n if node_zorder is not None:\n nodes.set_zorder(node_zorder)\n if node_edge > 0:\n nodes.set_edgecolor(node_edge_color)\n nodes.set_linewidth(node_edge)\n \n # finish\n ax = despine(ax, **kwargs)\n return nodes, edges", "def non_pol_neighbours_graph():\n data = pd.read_csv(\"/Users/emg/GitHub/thesis/output/2019_01/1000_residuals_output_utf8.csv\", index_col=0)\n\n labelled = label_subs(data)\n labelled['resid_rank'] = labelled.resid.rank(pct=True)\n top = subset_df(labelled, 'resid', q=0.95)\n\n edges = top.copy()[['source','target','resid']]\n edges_rev = edges.copy()\n edges_rev.columns = ['target','source','resid']\n directed_edges = pd.concat([edges,edges_rev], sort=True)\n directed_edges['resid_rank'] = directed_edges['resid'].rank(pct=True)\n\n df = label_subs(directed_edges)\n\n pol_subs = load_pol_subs()\n pol_names = pol_subs.subreddit.str.replace('\\\\','')\n pol_subs.subreddit=pol_subs.subreddit.str.replace('\\\\','')\n\n pol_neighbours = df[df['source'].isin(pol_names)].sort_values('resid', ascending=False)\n\n top_pol_neigh = pol_neighbours.groupby('source').head(10).sort_values(['source','resid'], ascending=[True,False])\n \n x = top_pol_neigh[~top_pol_neigh.target.isin(pol_names)][['source','target']]\n\n col_dict = pol_subs.set_index('subreddit').col.to_dict()\n for sub in x.target.unique():\n col_dict[sub] = 'gray'\n\n G = nx.from_pandas_edgelist(x)\n nx.set_node_attributes(G, col_dict, 'col')\n\n f = plt.figure(1)\n ax = f.add_subplot(1,1,1)\n\n colors = dict(G.nodes(data='col')).values()\n\n pos = nx.spring_layout(G, k=0.2)\n nx.draw_networkx(G, pos=pos, with_labels=False, node_color=colors, alpha=0.3)\n #nx.draw_networkx_labels(G, pos=pos, with_labels=True)\n\n plt.axis('off')\n f.set_facecolor('w')\n \n f.tight_layout()\n plt.savefig(figures_path(f\"{date}/non_pol_neighbours_graph.png\"))\n plt.close()", "def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def draw_graph(graph, node_positions):\n nx.draw_networkx_nodes(graph, node_positions, node_color=set_colors(graph),\n node_size=50)\n nx.draw_networkx_edges(graph, node_positions, width=0.3, alpha=0.5)", "def printGraph(tree, filename):\n G = pgv.AGraph() #Constructs a graph object\n for key in tree.keys():\n G.add_node(key)\n for subkey in tree[key].keys():\n G.add_node(subkey)\n G.add_edge(key,subkey,label=str(tree[key][subkey]),\\\n len=max(1, tree[key][subkey]))\n #length can't be less than 1, so that labels are readable\n\n G.draw(filename,prog=\"neato\")", "def graph(self, *links):\n\n groups = self._model_terms(links)\n fig, ax = plt.subplots()\n for group in groups:\n for term in group:\n termdata = self._term_data[term]\n termdata.graph(ax)\n\n return ax", "def plot(nodes=None, fig: Optional[plt.Figure] = None, ax=None, view: str = 'L', edge_weights=None, frames=None, edges=None, template=None, network=None,\n edge_color='k', node_size=1, node_color='salmon', node_type='circles', hemisphere='both', highlight_nodes=None, highlight_edges=None, **kwargs):\n # Load default settings, then update with kwargs\n profile = _load_profile(**kwargs)\n if network is not None:\n if nodes is not None or edges is not None:\n raise ValueError('Network keyword arugment is specified along with edges or nodes.')\n elif isinstance(network, nx.Graph):\n nodes, edges, = _from_networkx_input(network, **profile)\n else:\n raise ValueError('Unnown netowrk input')\n\n # Check and load the input of nodes and edges\n nodes, nodeimg, node_colorby, profile['node_columnnames'] = _process_node_input(\n nodes, profile['nodes_df'], node_color, profile['node_columnnames'], template, profile['template_voxelsize'])\n edges, edge_weights = _process_edge_input(edges, edge_weights, **profile)\n # Set up legend row\n # TODO compact code into subfunction\n legends = None\n legendrows = 0\n if isinstance(profile['showlegend'], list):\n legends = profile['showlegend']\n legendrows = len(legends)\n elif profile['showlegend'] is True:\n # Only plot size legend is sphere/circle and string or list input\n # TODO setup_legend is a little clunky and could be fixed\n if node_type != 'parcel' and not isinstance(node_size, (float, int)):\n node_sizelegend = profile['node_sizelegend']\n legends = _setup_legend(\n node_size, node_sizelegend, 'node_size', legends)\n # Only plot color legend if colorby\n if node_colorby is not None:\n node_colorlegend = profile['node_colorlegend']\n legends = _setup_legend(\n node_colorby, node_colorlegend, 'node_color', legends)\n if legends is not None:\n legendrows = len(legends)\n\n # Figure setup\n # Get preset views\n if isinstance(view, str):\n if view.startswith('preset'):\n view, hemisphere = _get_presetviews(view)\n # Get number of non-legend rowsnon\n nrows, view, frames = _nrows_in_fig(view, frames)\n\n # if neither title nor subtitles are set, only view name(s) is/are shown\n if profile['subtitles'] == 'auto' and profile['title'] == 'auto':\n profile['subtitles'] = 'auto'\n profile['title'] = None\n # if title is set to None, nothing is shown (view name(s) is/are removed)\n elif profile['title'] is None and profile['subtitles'] == 'auto':\n profile['subtitles'] = None\n\n if type(profile['subtitles']) is list:\n if len(profile['subtitles']) != frames*nrows:\n raise ValueError('Length subtitles must be equal to number of sub-plots')\n\n # Init figure, if not given as input\n if ax is None:\n fig, gridspec = _init_figure(frames, nrows, legendrows)\n else:\n expected_ax_len = (nrows * frames)\n ax, gridspec = _check_axinput(ax, expected_ax_len)\n\n # Set node_color to colorby argument\n if node_colorby is not None:\n node_color = _get_colorby_colors(nodes, node_colorby, **profile)\n if isinstance(edge_color, str) and edges is not None:\n if edge_color in edges:\n edge_color = _get_colorby_colors(edges, edge_color, 'edge', **profile)\n if highlight_nodes is not None and highlight_edges is not None:\n raise ValueError('Cannot highlight based on edges and nodes at the same time.')\n if highlight_nodes is not None:\n node_color, highlight_nodes, profile['node_alpha'] = _highlight_nodes(\n nodes, node_color, highlight_nodes, **profile)\n\n if highlight_edges is not None:\n edges, highlight_edges = _process_highlightedge_input(edges, highlight_edges, **profile)\n edge_color, highlight_edges, profile['edge_alpha'] = _highlight_edges(edges, edge_color, highlight_edges, **profile)\n # Get the nodes that are touched by highlighted edges\n nodes_to_highlight = edges[highlight_edges == 1]\n nodes_to_highlight = np.unique(nodes_to_highlight[profile['edge_columnnames']].values)\n node_color, highlight_nodes, profile['node_alpha'] = _highlight_nodes(\n nodes, node_color, nodes_to_highlight, **profile)\n\n # Rename ax as ax_in and prespecfiy ax_out before forloop\n ax_in = ax\n ax_out = []\n scaled_nodes = False\n # TODO remove double forloop and make single forloop by running over nrows and frames\n # TODO add test for single image across frames and copy axis for speed.\n for ri in range(nrows):\n # Get the azim, elev and arrowaxis for each row\n azim, elev, arrowaxis_row, viewtype = _get_view(\n view[ri], frames, arrowaxis=profile['arrowaxis'])\n for fi in range(frames):\n axind = (ri * nrows) + fi\n # get_frame_input allows input arguments to be string or list of different arguments for different plots\n hemi_frame = get_frame_input(hemisphere, axind, ri, fi, nrows, frames)\n subtitle_frame = get_frame_input(profile['subtitles'], axind, ri, fi, nrows, frames)\n template_style_frame = get_frame_input(profile['template_style'], axind, ri, fi, nrows, frames)\n # Set up subplot\n if ax_in is None:\n # Dont use 3d projection for connectivity matrices\n if viewtype[fi] == 'c':\n ax = fig.add_subplot(gridspec[ri, fi])\n else:\n ax = fig.add_subplot(gridspec[ri, fi], projection='3d')\n elif isinstance(ax_in, list):\n # here ax can only be a 1d list, not 2d list.\n ax = ax_in[axind]\n else:\n ax = ax_in\n affine = None\n if template is not None and viewtype[fi]=='b':\n affine = _plot_template(ax, template_style_frame, template,\n hemisphere=hemi_frame,\n azim=azim[fi], elev=elev[fi],\n **profile)\n\n # Template voxels will have origin at 0,0,0\n # It is easier to scale the nodes from the image affine\n # Then to rescale the ax.voxels function\n # So if affine is not None, nodes get scaled in relation to origin and voxelsize,\n # If node coords are derived from nodeimg, this has already been taken care of.\n if nodes is not None and nodeimg is None and viewtype[fi]=='b' and scaled_nodes == False:\n nodes = _scale_nodes(nodes, profile['node_columnnames'], affine)\n scaled_nodes = True\n # nodes and subplot may change for each frame/subplot\n # e.g. if hemisphere is specified\n nodes_frame = None\n if nodes is not None and viewtype[fi]=='b':\n nodes_frame = nodes.copy()\n nodes_frame = _select_single_hemisphere_nodes(\n nodes_frame, profile['node_columnnames'][0], affine, hemi_frame)\n\n if node_type == 'spheres':\n _plot_spheres(ax, nodes_frame, node_color=node_color,\n node_size=node_size, **profile)\n elif node_type == 'circles':\n _plot_nodes(ax, nodes_frame, node_color=node_color,\n node_size=node_size, **profile)\n elif node_type == 'parcels':\n _plot_parcels(ax, nodeimg, cmap=node_color,\n hemisphere=hemi_frame, **profile)\n if edges is not None and viewtype[fi]=='b':\n edges_frame = edges.copy()\n _plot_edges(ax, nodes_frame, edges_frame, edgewidth=edge_weights,\n edge_color=edge_color, highlight_nodes=highlight_nodes, **profile)\n if arrowaxis_row is not None and viewtype[fi]=='b':\n _add_axis_arrows(ax, dims=arrowaxis_row,\n origin=profile['arroworigin'],\n azim=azim[fi], elev=elev[fi], **profile)\n if viewtype[fi] == 's' and nodes is not None and edges is not None:\n _plot_springlayout(ax, nodes=nodes, edges=edges, node_color=node_color, node_size=node_size,\n edge_color=edge_color, edge_weights=edge_weights, highlight_nodes=highlight_nodes, **profile)\n if viewtype[fi] == 'c' and edges is not None:\n _plot_connectivitymatrix(ax, edges=edges, nodes=nodes, node_color=node_color, node_colorby=node_colorby, **profile)\n # Set view angle for 3d projections\n if viewtype[fi] != 'c':\n ax.view_init(azim=azim[fi], elev=elev[fi])\n\n _add_subplot_title(ax, azim[fi], elev[fi], subtitle_frame, hemi_frame, viewtype[fi], **profile)\n _add_title(fig, **profile)\n\n if viewtype[fi] != 'c':\n # Fix the aspect ratio\n ax.set_box_aspect([1, 1, 1])\n _set_axes_equal(ax)\n ax.axis('off')\n # Append ax to ax_out to store it.\n ax_out.append(ax)\n\n # Add legends to plot\n if legends is not None and profile['gif'] is False:\n for li, legend in enumerate(legends):\n # setup legend subplot. Goes in centre or centre2 subplots\n spind = gridspec.ncols\n legend_span = profile['legend_span']\n if legend_span is not None:\n if legend_span is int:\n legend_subplotp_colind = legend_span\n else:\n legend_subplotp_colind= slice(legend_span[0], legend_span[1])\n elif np.remainder(spind, 2) == 0:\n # if number of columns is even, center it over the middle two columns\n # by using slice() on the GridSpec.\n legend_subplotp_colind = slice(int((spind / 2) - 1), int(spind / 2) + 1)\n else:\n legend_subplotp_colind = int(np.round(spind / 2) - 1)\n ax = fig.add_subplot(gridspec[nrows + li, legend_subplotp_colind])\n if legend == 'node_size':\n ax = _add_node_size_legend(ax, nodes, node_size, **profile)\n if legend == 'node_color':\n ax = _add_node_color_legend(\n ax, nodes, node_colorby, node_color, **profile)\n ax.axis('off')\n #ax = _add_size_legend(ax, nodes, node_size, node_scale)\n ax_out.append(ax)\n\n # Title on top of the figure\n if profile['title'] is not None:\n _add_title(fig, **profile)\n\n fig.tight_layout()\n\n # If gif is requested, create the gif.\n if profile['gif'] is True:\n _plot_gif(fig, ax_out, profile['gif_duration'], profile['savename'], profile['gif_loop'])\n # Save figure if set\n elif profile['savename'] is not None:\n if profile['savename'].endswith('.png'):\n fig.savefig(profile['savename'], dpi=profile['fig_dpi'])\n elif profile['savename'].endswith('.svg'):\n fig.savefig(profile['savename'], dpi=profile['fig_dpi'])\n else:\n fig.savefig(profile['savename'] + '.png', dpi=profile['fig_dpi'])\n fig.savefig(profile['savename'] + '.svg', dpi=profile['fig_dpi'])\n\n return (fig, ax_out)", "def show(self, output_file=\"ast_viz.pdf\"):\n pos = radial_tree_layout(self.graph, self.graph.vertex(0))\n scale = self.graph.num_vertices()\n\n graph_draw(self.graph, vertex_text=self.graph.vp.type, # self.graph.vertex_index, #\n pos=pos, vertex_font_size=scale,\n output=output_file, output_size=(scale * 200, scale * 200))", "def show_graph(self):\n graph_file = self.dump_graph()\n subprocess.check_output(shlex.split(f'gwenview {graph_file}'))", "def _plot_graph(self) -> None:\n ghg_data, bird_data = self._datasets\n model = self._selection.get_model(ghg_data, bird_data)\n model.plot_data('Percent Change in Bird population (from 1970) vs '\n 'Amount of Greenhouse gas produced in a year',\n 'Amount of Greenhouse gas produced in a year (kt)',\n 'Percent Change in Bird population (from 1970)')", "def plot_graph(self):\r\n A = self.a_grid ; V = self.V1 ; Pol = self.Pol\r\n A_opt = A[Pol.astype(int)]\r\n \r\n fig = plt.subplots(figsize = (8,5))\r\n ax = [None,None]\r\n pltgrid = (1,2)\r\n \r\n ax[0] = plt.subplot2grid(pltgrid, (0,0))\r\n ax[1] = plt.subplot2grid(pltgrid, (0,1))\r\n \r\n ax[0].plot(A[:],V[:,0,0], linewidth = 2, color = 'blue', label = r'$V(a)$: Low $w$')\r\n ax[0].plot(A[:],V[:,0,5], linewidth = 2, color = 'green', label = r'$V(a)$: Median $w$')\r\n ax[0].plot(A[:],V[:,0,-1], linewidth = 2, color = 'red', label = r'$V(a)$: High $w$')\r\n \r\n ax[1].plot(A[:],A_opt[:,0,0], linewidth = 2, color = 'blue', label = r'$a\\'(a)$: Low $w$')\r\n ax[1].plot(A[:],A_opt[:,0,5], linewidth = 2, color = 'green', label = r'$a\\'(a)$: Median $w$')\r\n ax[1].plot(A[:],A_opt[:,0,-1], linewidth = 2, color = 'red', label = r'$a\\'(a)$: High $w$')\r\n ax[1].plot(A[:],A[:], linewidth = 2, color = 'violet', linestyle = 'dashed', zorder = 1)\r\n \r\n \r\n ax[0].set_xlabel(r'$a$') ; ax[0].legend()\r\n ax[1].set_xlabel(r'$a$') ; ax[1].legend()\r\n ax[0].set_title('Value function')\r\n ax[1].set_title('Asset policy')\r\n \r\n plt.tight_layout()\r\n plt.show()", "def connect_nodes(self):\n node1 = str(self.form.node1_text.toPlainText())\n node2 = str(self.form.node2_text.toPlainText())\n weight = str(self.form.weight_text.toPlainText())\n self.form.node1_text.clear()\n self.form.node2_text.clear()\n self.form.weight_text.clear()\n\n if not node1 or not node2 or not weight: \n self.show_dialog(\"Empty argument.\")\n return\n \n try:\n weight = int(weight)\n except:\n self.show_dialog(\"Weight should be an integer.\")\n return\n\n if self.G.has_edge(node1, node2):\n self.show_dialog(f\"Edge: {node1, node2} is already constructed.\")\n\n else:\n self.G.add_edge(node1, node2, weight=weight)\n self.form.plot_canvas.plot(self.G)", "def _showConnectionGraph(self):\n self._console_output(\"Creating connect graph...\")\n res = True\n\n u = InfoUI.function_orig_ea\n v = InfoUI.function_dest_ea\n\n cg = self.ba.get_connect_graph(u, v)\n res = self.ba.show_connect_graph(cg)\n\n if not res:\n self._console_output(\n \"[x] No connection between %08x and %08x\" % (u, v),\n err = True)", "def draw_di_graph(graph_object, scale_by_degree=True):\n positions = nx.spring_layout(graph_object)\n if scale_by_degree:\n d = nx.degree(graph_object)\n keys, degrees = zip(*d)\n network = nx.draw(graph_object, nodelist=keys,\n node_size=[5*degree for degree in degrees],\n pos=positions, alpha=0.5, arrows=False)\n else:\n network = nx.draw(graph_object, pos=positions, node_size=50, alpha=0.5)\n # labels = nx.draw_networkx_labels(graph, pos=positions)\n return positions, network, plt.gca()", "def plot_sql(self, table_name=None):\n\t\tlabels={}\n\t\tif table_name in self.DiG:\n\t\t\tsubgraph = self.get_subgraph(table_name)\n\t\t\tfor node in subgraph:\n\t\t\t\tlabels[node] = node\n\t\t\tprint(labels)\n\t\t\tnx.draw_spring(subgraph,labels=labels,font_size=8,node_size=300,alpha=0.7)\n\t\t\tplt.draw()\n\t\t\tplt.show()\n\t\t\treturn True\n\t\tnx.draw_spring(self.DiG,labels=labels,font_size=8,node_size=300,alpha=0.7)\n\t\tplt.draw()\n\t\tplt.show()" ]
[ "0.75382435", "0.7278241", "0.7211741", "0.71995574", "0.70474607", "0.69660336", "0.6962253", "0.68822443", "0.6876223", "0.68665344", "0.685607", "0.6749594", "0.67388505", "0.67268574", "0.6711935", "0.6700764", "0.6693562", "0.6683998", "0.6630944", "0.6603868", "0.6555654", "0.65506417", "0.6545641", "0.6501951", "0.6494064", "0.6486673", "0.6464168", "0.64488745", "0.6447923", "0.64304465", "0.6415688", "0.6403753", "0.6398126", "0.63911945", "0.6385197", "0.6384559", "0.63654834", "0.63628954", "0.63526857", "0.633873", "0.63351053", "0.63297164", "0.631047", "0.6308115", "0.6286033", "0.6284464", "0.6254339", "0.62388074", "0.62382215", "0.6229956", "0.6221604", "0.61831737", "0.6178354", "0.61769956", "0.6148595", "0.6138621", "0.610305", "0.6090716", "0.60849035", "0.6082893", "0.6081486", "0.6072982", "0.60720545", "0.6068062", "0.6040469", "0.60297054", "0.6028974", "0.602871", "0.6024641", "0.60167295", "0.60121477", "0.6009968", "0.5992382", "0.5989696", "0.5987085", "0.59797823", "0.5977288", "0.5966678", "0.5963592", "0.5956104", "0.5942831", "0.5942096", "0.5941181", "0.5928592", "0.59167486", "0.5906567", "0.5900735", "0.59002036", "0.59001243", "0.5898288", "0.5887703", "0.58873457", "0.58750564", "0.5873745", "0.58701354", "0.58605784", "0.5852359", "0.5846851", "0.58458716", "0.5844112" ]
0.8590499
0
Display the variables and relation as a graph, using networkx and matplotlib.
def display_bipartite_graph(variables, relations): graph = as_networkx_bipartite_graph(variables, relations) # Do not crash if matplotlib is not installed try: import matplotlib.pyplot as plt pos = nx.drawing.spring_layout(graph) variables = set(n for n, d in graph.nodes(data=True) if d["bipartite"] == 0) factors = set(graph) - variables nx.draw_networkx_nodes( graph, pos=pos, with_labels=True, nodelist=variables, node_shape="o", node_color="b", label="variables", alpha=0.5, ) nx.draw_networkx_nodes( graph, pos=pos, with_labels=True, nodelist=factors, node_shape="s", node_color="r", label="factors", alpha=0.5, ) nx.draw_networkx_labels(graph, pos=pos) nx.draw_networkx_edges(graph, pos=pos) # nx.draw_random(graph) # nx.draw_circular(graph) # nx.draw_spectral(graph) plt.show() except ImportError: print("ERROR: cannot display graph, matplotlib is not installed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_graph(variables, relations):\n graph = as_networkx_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n nx.draw_networkx(graph, with_labels=True)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")", "def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()", "def plot_graph(self) -> None:", "def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()", "def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def network_graph(net_dict=None):\n if net_dict == None:\n net_dict = {}\n else:\n G = nx.from_dict_of_lists(net_dict)\n plt.figure(num=None, figsize=(30, 30), dpi=80, facecolor='w', edgecolor='c')\n nx.draw_networkx(G, with_labels=True, alpha=0.5, edge_color='c', cmap=plt.cm.GnBu)\n plt.savefig(\"metabolism_5years.png\", bbox_inches='tight')", "def showGraph(self, file_name = \"\"):\n \n # prepare edges and weights for visualization\n edges = self.graph.edges()\n weights = [self.graph_data[u]['pheromones'][v] for u,v in edges]\n weights_sum = sum(weights)\n weights = [ (w/weights_sum)*50 for w in weights]\n \n # prepare different shades of red to be used to optionally differentiate\n # between edges with different costs\n # to show more informatiion on the same graph\n colors = []\n max_cost = max([self.graph_data[u]['costs'][v] for u,v in edges])\n for u,v in edges:\n if self.graph_data[u]['costs'][v] <= max_cost/32:\n colors.append('#ff7f7f')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/16:\n colors.append('#ff6666')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/8:\n colors.append('#ff4c4c')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/4:\n colors.append('#ff3232')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/2:\n colors.append('#ff1919')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost:\n colors.append('#ff0000')\n continue\n \n # print the graph \n pos=nx.circular_layout(self.graph)\n nx.draw( self.graph,pos=pos,node_size=200,node_color='#A8A8A8', with_labels=True,edges=edges, edge_color=colors,edge_cmap=plt.cm.Blues, width=weights)\n if file_name != \"\":\n path = \"img/\"+file_name\n plt.savefig(path, format=\"PNG\")\n plt.show()", "def visualize_graph(edges_lst):\n G = nx.Graph()\n for edge in edges_lst:\n start = edge[0]\n end = edge[1]\n weight = edge[2]\n G.add_edge(start, end, weight=weight)\n pos = nx.planar_layout(G)\n nx.draw_networkx(G, pos)\n labels = nx.get_edge_attributes(G, 'weight')\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.show()", "def showGraph(G, mate, label=\"\"):\r\n \r\n # Set the positions for all nodes and the figure size\r\n plt.close('all')\r\n plt.figure( figsize=(10, 10) )\r\n pos = nx.graphviz_layout(G, prog='sfdp', args='')\r\n \r\n # Draw the graph with node labels and a title\r\n plt.title(label)\r\n nx.draw(G, pos, node_size=400, with_labels=True)\r\n \r\n # Draw the matched edges\r\n nx.draw_networkx_edges(G, pos, edgelist=mate.items(),\r\n width=5, alpha=0.4, edge_color='b')\r\n \r\n plt.axis('off')\r\n plt.show()", "def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'", "def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()", "def disp_graph(graph, output_filename):\n dot = Graph(name=\"Graph\", format=\"png\") # instantiate a graph object\n for node in graph.keys(): # add nodes to the graph\n dot.node(str(node))\n for node in graph.keys(): # for every node in the input graph\n # for every other node in the input graph that the first node is connected to\n for other_node in graph[node].keys():\n dot.edge(str(node), str(other_node)) # create the edge\n dot.render(output_filename, view=True) # visualize the graph and save it", "def plot_graph(G):\r\n pos = nx.random_layout(G)\r\n nx.draw(G, pos)\r\n edge_labels = dict([((u, v, ), d['label']) for u, v, d in\r\n G.edges(data=True)])\r\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\r\n nx.draw_networkx_labels(G, pos, labels={i:i for i in G.nodes()},\r\n font_size=16)\r\n plt.show()", "def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()", "def plot_graph(self, input_graph, NX_GRAPHS):\n self.dgl_graph = input_graph\n self.NX_GRAPHS = NX_GRAPHS\n \n self.get_nodes()\n color_monomer = self.get_colors()\n \n print(dict(zip(range(len(self.nodes_list)), self.nodes_list)))\n print('Key Monomer is', self.nodes_list[np.argmax(self.node_weights)])\n \n fig, ax = plt.subplots()\n nx.draw_networkx(\n dgl.to_networkx(self.dgl_graph),\n arrows=False,\n node_size = 300*10**self.node_weights,\n node_color = [color_monomer[node] for node in self.nodes_list],\n font_size = 18,\n font_color = 'w',\n font_weight = 'bold',)\n\n plt.axis('off')\n ax.set_xlim([1.2*x for x in ax.get_xlim()])\n ax.set_ylim([1.2*y for y in ax.get_ylim()])\n plt.show()", "def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return", "def plot_graph(self):\n plt.axis(\"off\")\n pos = nx.kamada_kawai_layout(self.graph)\n return nx.draw_networkx(self.graph, pos=pos, node_size=400)", "def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()", "def drawGraph(A):\n m,n = A.shape\n labels = {}\n for i in range(n):\n labels[i]=str(i)\n gr = nx.from_numpy_matrix(A.T,create_using=nx.DiGraph())\n nx.draw(gr,arrows=True,node_color='#15b01a',labels=labels)\n plt.show()", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def visualize(self, A):\n G = nx.from_numpy_matrix(np.array(A))\n nx.draw(G, with_labels=True)\n plt.show()\n plt.clf()\n exit(0)", "def plot(model, pos=None, scale=1, figsize=(15, 8), verbose=3):\n out = {}\n G = nx.DiGraph() # Directed graph\n layout='fruchterman_reingold'\n\n # Extract model if in dict\n if 'dict' in str(type(model)):\n model = model.get('model', None)\n\n # Bayesian model\n if 'BayesianModel' in str(type(model)) or 'pgmpy' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on BayesianModel')\n # positions for all nodes\n pos = network.graphlayout(model, pos=pos, scale=scale, layout=layout, verbose=verbose)\n # Add directed edge with weigth\n # edges=model.edges()\n edges=[*model.edges()]\n for i in range(len(edges)):\n G.add_edge(edges[i][0], edges[i][1], weight=1, color='k')\n elif 'networkx' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on networkx model')\n G = model\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n else:\n if verbose>=3: print('[bnlearn] >Plot based on adjacency matrix')\n G = network.adjmat2graph(model)\n # Get positions\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n\n # Bootup figure\n plt.figure(figsize=figsize)\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=500, alpha=0.85)\n # edges\n colors = [G[u][v].get('color', 'k') for u, v in G.edges()]\n weights = [G[u][v].get('weight', 1) for u, v in G.edges()]\n nx.draw_networkx_edges(G, pos, arrowstyle='->', edge_color=colors, width=weights)\n # Labels\n nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\n # Get labels of weights\n # labels = nx.get_edge_attributes(G,'weight')\n # Plot weights\n nx.draw_networkx_edge_labels(G, pos, edge_labels=nx.get_edge_attributes(G, 'weight'))\n # Making figure nice\n ax = plt.gca()\n ax.set_axis_off()\n plt.show()\n\n # Store\n out['pos']=pos\n out['G']=G\n return(out)", "def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')", "def plot_dag(\n self,\n filename,\n traverser,\n node_size=500,\n label_font_size=12,\n text_angle=0,\n image_width=16,\n image_height=12,\n ):\n # map nodes to a color for their operation type\n # https://stackoverflow.com/questions/27030473/how-to-set-colors-for-nodes-in-networkx-python\n color_map = []\n colors = [\"#fbb4ae\", \"#b3cde3\", \"#ccebc5\", \"#decbe4\", \"#fed9a6\"]\n for node in self.G2:\n if self.node_map[node] == OperationType.reader.value:\n color_map.append(colors[0])\n elif self.node_map[node] == OperationType.pipeline.value:\n color_map.append(colors[1])\n elif self.node_map[node] == OperationType.model.value:\n color_map.append(colors[2])\n elif self.node_map[node] == OperationType.writer.value:\n color_map.append(colors[3])\n else:\n color_map.append(colors[4])\n\n fig = plt.figure(figsize=(image_width, image_height))\n ax = plt.subplot(111)\n ax.set_title(filename, fontsize=10)\n\n try:\n import pydot\n from networkx.drawing.nx_pydot import graphviz_layout\n except ImportError: # pragma: no cover\n raise ImportError(\n \"This example needs Graphviz and pydot.\"\n \"Please refer to the Plotting requirements in the README\"\n )\n\n # pos = nx.spring_layout(G)\n # pos = nx.circular_layout(G)\n # pos = nx.kamada_kawai_layout(G)\n # pos = nx.shell_layout(G)\n # pos = nx.spectral_layout(G)\n pos = graphviz_layout(self.G2, prog=\"dot\") # , prog='twopi', args='')\n\n nx.draw(\n self.G2,\n pos,\n node_size=node_size,\n node_color=color_map,\n edge_color=\"#939393\",\n font_size=8,\n font_weight=\"bold\",\n )\n # nx.draw_networkx_nodes(G, pos, node_color='b', node_size=500, alpha=0.8)\n\n if len(self.conditional_nodes) > 0:\n cnodes = nx.draw_networkx_nodes(\n self.G2,\n pos,\n node_color=\"#e6b655\",\n node_size=1.5 * node_size,\n alpha=0.8,\n node_shape=\"D\",\n nodelist=list(self.conditional_nodes),\n )\n cnodes.set_edgecolor(\"red\")\n\n # nx.draw_networkx_labels(self.G2,pos, font_size=9)\n\n text = nx.draw_networkx_labels(\n self.G2, pos, font_size=label_font_size\n )\n\n if traverser:\n # map node name to sequence number\n sequence = traverser.traversal_list()\n idx = list(range(1, len(sequence) + 1))\n d = dict(zip(sequence, idx))\n\n # let's plot the sequence numner above the node. How far above it?\n ys = [t._y for _, t in text.items()]\n ysrange = max(ys) - min(ys)\n offset = 0.02 * abs(ysrange)\n\n for _, t in text.items():\n t.set_rotation(text_angle)\n\n if traverser:\n plt.text(t._x, t._y + offset, d[t._text], fontsize=24, color=\"red\")\n\n plt.axis(\"off\")\n plt.tight_layout()\n plt.savefig(filename, format=\"PNG\")\n logging.info(\"Graph written to %s\" % filename)", "def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph", "def draw(self):\n nx.draw_networkx(self.rc)", "def plot_graph(self, graph, subplot=False, axes=None):\n if subplot:\n plt.sca(axes[1, 1])\n axes[1, 1].axis('off')\n else:\n plt.figure(figsize=(5, 5))\n if len(graph.nodes) == 4:\n pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}\n else:\n pos = nx.circular_layout(graph)\n nx.draw_networkx_nodes(\n graph, pos, node_size=1800, node_color='w', edgecolors='k')\n nx.draw_networkx_edges(\n graph,\n pos,\n node_size=1800,\n edge_color='k',\n arrowstyle='->',\n arrowsize=10,\n width=3)\n nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)", "def PrintGraph(self):\n # print(\"Graph has {} nodes and {} edges.\".format(Node.count, Edge.count))\n # print(\"Unique connected nodes:\")\n # for (a, b) in self.connections:\n # print(\"{},{}\".format(a.index, b.index))\n\n # print(f\"\\nAll edges : {[e.index for e in self.edges]}\")\n\n # print(\"\\nDegree of nodes\")\n\n # for node in self.nodes:\n # print(f\"D of {node.index} = {len(node.neighbours)}\")\n\n for node in self.nodes:\n print(\"{}. ({}, {})\".format(node.index, node.x, node.y))", "def draw_graph_default(graph):\r\n\r\n nx.draw_networkx(graph, with_labels=True)\r\n plt.show()", "def plot_network(graph, chars = None, show_all = False, set_width = None, output='plot'):\n if chars is not None:\n graph = graph.subgraph(chars)\n\n scaled = scale_edge_weights(graph)\n pos = nx.spring_layout(graph, k =.75 , seed = 1)\n\n #Add edges\n edge_traces, edge_text_trace = make_edges(scaled, pos, graph, show_all, set_width)\n\n #Add nodes\n node_xs = [pos[node][0] for node in scaled.nodes()]\n node_ys = [pos[node][1] for node in scaled.nodes()]\n node_text = ['<b>'+node.capitalize() for node in scaled.nodes()]\n node_hovertext = []\n for node in graph.nodes():\n node_hovertext.append(node.capitalize() + ': '+ str(graph.nodes()[node]['size']) + ' appearances')\n node_trace = go.Scatter(x = node_xs,\n y = node_ys,\n text = node_text,\n textposition = \"bottom center\",\n textfont_size = 14,\n mode = 'markers+text',\n hovertext = node_hovertext,\n hoverinfo = 'text',\n marker = dict(color = 'black',#'#6959CD',\n size = 15,\n line = None))\n layout = go.Layout(paper_bgcolor='rgba(0,0,0,0)',plot_bgcolor='rgba(0,0,0,0)')\n fig = go.Figure(layout = layout)\n\n for trace in edge_traces:\n fig.add_trace(trace)\n fig.add_trace(node_trace)\n fig.add_trace(edge_text_trace)\n\n fig.update_layout(showlegend = False, width = 1000, height = 1200)\n fig.update_xaxes(showticklabels = False)\n fig.update_yaxes(showticklabels = False)\n\n if output == 'plot':\n fig.show()\n elif output == 'return':\n return fig\n elif output == 'save':\n fig.write_image('graph.png')\n else:\n fig.show()", "def create_graph_network_visualization(graph_network, connections, connections_grouped):\n\n edge_trace = go.Scatter(\n x=[],\n y=[],\n customdata=[],\n text=[],\n line=dict(width=2, color='#888'),\n hoverinfo='all',\n mode='lines+text',\n textposition='top left',\n )\n edge_label_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n textposition='top left',\n mode='markers+text',\n hoverinfo='none',\n marker=go.Marker(\n opacity=0\n ),\n textfont=dict(size=20, color='black')\n )\n\n for edge in graph_network.edges():\n x0, y0 = graph_network.node[edge[0]]['pos']\n x1, y1 = graph_network.node[edge[1]]['pos']\n edge_weight = graph_network.node[edge[1]]['pos']\n edge_trace['x'] += tuple([x0, x1, None])\n edge_trace['y'] += tuple([y0, y1, None])\n\n text = graph_network[edge[0]][edge[1]]['weight']\n edge_label_trace['x'] += tuple([(x0 + x1) / 2])\n edge_label_trace['y'] += tuple([(y0 + y1) / 2])\n edge_label_trace['text'] += tuple([text])\n\n # writing to edge customdata\n edge_trace['customdata'] += graph_network[edge[0]][edge[1]]['weight']\n edge_trace['text'] = str(graph_network[edge[0]][edge[1]]['weight'])\n # edge_trace['marker']['size'] += professor_graph[edge[0]][edge[1]]['weight']\n # print(graph_network[edge[0]][edge[1]]['weight'])\n\n node_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n hovertext=[],\n mode=\"markers+text\",\n hoverinfo='text',\n textposition='bottom center',\n marker=dict(\n showscale=False,\n # colorscale options\n # ['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',\n # 'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',\n # 'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis]\n colorscale='YlGnBu',\n reversescale=True,\n color=[],\n size=40,\n colorbar=dict(\n thickness=15,\n title='Node Connections',\n xanchor='left',\n titleside='right'\n ),\n line=dict(width=2))\n )\n\n entry_bool = True\n\n for node in graph_network.nodes():\n x, y = graph_network.node[node]['pos']\n node_trace['x'] += tuple([x])\n node_trace['y'] += tuple([y])\n # node_trace['text'].append(node)\n\n # x, y = professor_graph.node[node]['pos']\n # node_trace['x'].append(x)\n # node_trace['y'].append(y)\n\n if entry_bool:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n entry_bool = False\n total_projects = \"Total Projects: {}\".format(len(connections[\"Proposal Number:\"].unique()))\n print(\"Total Projects\", total_projects)\n node_trace['hovertext'] += tuple([total_projects])\n else:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n some_text = []\n some_text.append(node + \"<br>\")\n for i in range(len(connections_grouped.loc[node]['proposal_number'])):\n if i > 0:\n some_text.append(\"<br>\")\n print(\"list index is \", i)\n print(\"prop number is \", connections_grouped.loc[node]['proposal_number'][i])\n some_text.append(connections_grouped.loc[node]['proposal_number'][i])\n # import pdb\n # pdb.set_trace()\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['proposal_title'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['project_status'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['institution'][i])\n some_text.append(\"<br>\")\n some_text = [x for x in some_text if str(x) != 'nan']\n\n some_text = \"\".join(some_text)\n print(node)\n print(\"yo is \", some_text)\n # node_trace['hovertext'].append(some_text)\n node_trace['hovertext'] += tuple([some_text])\n\n for node, adjacencies in enumerate(graph_network.adjacency_list()):\n # print(node,adjacencies)\n # print(professor_graph[node])\n node_trace['marker']['color'] += tuple([len(adjacencies)])\n\n return node_trace, edge_trace, edge_label_trace", "def visualize(G, color=None, figsize=(5, 5)):\n plt.figure(figsize=figsize)\n plt.xticks([])\n plt.yticks([])\n nx.draw_networkx(G,\n pos=nx.spring_layout(G, seed=42),\n with_labels=True,\n node_color=color,\n cmap=\"Set2\")\n plt.show();", "def draw_network(graph, users, filename):\n ###TODO-- Completed\n candidate_names = [user['screen_name'] for user in users]\n plt.figure(figsize=(12,12))\n candidate_labels = {node: node if node in candidate_names else '' for node in graph.nodes_iter()}\n #print(candidate_labels)\n nx.draw_networkx(graph, labels=candidate_labels, alpha=0.5, node_color='r', node_size=100, width=0.1)\n #plt.show()\n plt.axis('off')\n plt.savefig(filename)\n #pass", "def draw_network(graph, filename):\n plt.figure(figsize=(12,12))\n nx.draw_networkx(graph, with_labels=False, alpha=.5, width=.1, node_size=100)\n plt.axis(\"off\")\n plt.savefig(filename, format=\"PNG\")", "def plot_system_topology(graph):\n\n plt.figure(figsize=(10,8))\n plt.title('System Topology')\n nx.draw(graph,\n pos=graphviz_layout(graph),\n node_size = [16 * graph.degree(n) for n in graph],\n with_labels = True,\n node_color = 'grey',\n font_size = 10,\n alpha = 0.5\n )", "def plot_graph(station_graph):\n G = nx.DiGraph()\n edge_labels = {graph[0]: graph[1] for graph in station_graph}\n node_labels = {graph[0]: graph[0][1] for graph in station_graph}\n for graph in station_graph:\n G.add_edge(graph[0][0], graph[0][1])\n red_edges = [station_graph[0][0]]\n blue_edges = [edge for edge in G.edges() if edge not in red_edges]\n pos = nx.spring_layout(G)\n nx.draw_networkx_nodes(G, pos, node_color='green', node_size=200)\n nx.draw_networkx_labels(G, pos, node_labels=node_labels)\n nx.draw_networkx_edges(G, pos, edgelist=red_edges, edge_color='r', arrows=True)\n nx.draw_networkx_edges(G, pos, edgelist=blue_edges, edge_color='b', arrows=True, arrowsize=10)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()", "def plot(model, pos=None, scale=1, figsize=(15, 8), interactive=False, title='bnlearn causal network', params = {'directed':True, 'height':'800px', 'width':'70%', 'notebook':False, 'layout':None, 'font_color': False, 'bgcolor':'#ffffff'}, verbose=3):\n defaults = {'directed':True, 'height':'800px', 'width':'70%', 'notebook':False, 'heading':title, 'layout':None, 'font_color': False, 'bgcolor':'#ffffff'}\n params = {**defaults, **params}\n\n out = {}\n G = nx.DiGraph() # Directed graph\n layout='fruchterman_reingold'\n\n # Extract model if in dict\n if 'dict' in str(type(model)):\n adjmat = model.get('adjmat', None)\n model = model.get('model', None)\n\n # Bayesian model\n if 'BayesianModel' in str(type(model)) or 'pgmpy' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on BayesianModel')\n # positions for all nodes\n pos = bnlearn.network.graphlayout(model, pos=pos, scale=scale, layout=layout, verbose=verbose)\n # Add directed edge with weigth\n # edges=model.edges()\n edges=[*model.edges()]\n for i in range(len(edges)):\n G.add_edge(edges[i][0], edges[i][1], weight=1, color='k')\n elif 'networkx' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on networkx model')\n G = model\n pos = bnlearn.network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n else:\n if verbose>=3: print('[bnlearn] >Plot based on adjacency matrix')\n G = bnlearn.network.adjmat2graph(adjmat)\n # Get positions\n pos = bnlearn.network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n\n # Make interactive or static plot\n if interactive:\n try:\n from pyvis import network as net\n from IPython.core.display import display, HTML\n # Convert adjacency matrix into Networkx Graph\n G = bnlearn.network.adjmat2graph(adjmat)\n # Setup of the interactive network figure\n g = net.Network(**params)\n # g = net.Network(directed=True, height='800px', width='70%', notebook=False, heading=title)\n g.from_nx(G)\n # Create advanced buttons\n g.show_buttons(filter_=['physics'])\n # Display\n filename = title.strip().replace(' ','_') + '.html'\n g.show(filename)\n display(HTML(filename))\n # webbrowser.open('bnlearn.html')\n except ModuleNotFoundError:\n if verbose>=2: print('[bnlearn] >\"pyvis\" module is not installed. Please pip install first: \"pip install pyvis\"')\n else:\n # Bootup figure\n plt.figure(figsize=figsize)\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=500, alpha=0.85)\n # edges\n colors = [G[u][v].get('color', 'k') for u, v in G.edges()]\n weights = [G[u][v].get('weight', 1) for u, v in G.edges()]\n nx.draw_networkx_edges(G, pos, arrowstyle='->', edge_color=colors, width=weights)\n # Labels\n nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\n # Get labels of weights\n # labels = nx.get_edge_attributes(G,'weight')\n # Plot weights\n nx.draw_networkx_edge_labels(G, pos, edge_labels=nx.get_edge_attributes(G, 'weight'))\n # Making figure nice\n ax = plt.gca()\n ax.set_axis_off()\n plt.show()\n\n # Store\n out['pos']=pos\n out['G']=G\n return(out)", "def print_graph(dag, image_path, graph_path):\n for node in dag.nodes():\n dag.node[node]['label'] = node.label\n nx.write_graphml(dag, graph_path)\n pos = nx.random_layout(dag)\n nx.draw_networkx(dag, ax=None, width=3, pos=pos)\n p.savefig(image_path)", "def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)", "def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"", "def draw_network(G, ds, n = 5, label = False):\n\n top_n = top_n_users(ds,5)\n top_n = [int(i[0]) for i in top_n]\n H = G.subgraph(top_n)\n for m in top_n:\n child = ds[m]\n for item in child:\n H.add_edge(m,item)\n\n print \"Drawing figure...\"\n\n fig = plt.figure()\n nx.draw(H,pos=nx.spring_layout(H), node_size = 1, alpha = 0.25,\n width = 0.25, with_labels = label)\n fig.suptitle('Top 5 nodes by 1st degree connection', fontsize=20)\n# plt.savefig(\"images/TopN.png\", format=\"PNG\")\n plt.show()", "def drawGraph(G, novel_title):\n # Drawing with network x\n page_rank = nx.pagerank(G)\n \n pos = nx.nx_pydot.graphviz_layout(G)\n plt.figure(figsize=(15,10))\n\n font = {'fontsize' : 14}\n plt.title('Character Network for: ' + novel_title, font)\n \n label_pos = {}\n for i in pos:\n label_pos[i] = (pos[i][0] , pos[i][1] - (math.exp(page_rank[i]) * 12))\n \n labels = nx.draw_networkx_labels(G, label_pos, font_weight = 'bold', font_size = 9)\n nodes = nx.draw_networkx_nodes(G, pos, \n node_size = [2000 * page_rank[i] for i in list(nx.nodes(G))],\n node_color = range(len(nx.pagerank(G))),\n cmap = plt.cm.Spectral)\n \n nodes.set_edgecolor('black')\n \n nx.draw_networkx_edges(G, pos, edge_color = 'grey', alpha = .70)\n plt.axis('off')\n plt.savefig('test.png')\n plt.show()", "def draw_graph(grph, edge_labels=True, node_color='#AFAFAF',\n edge_color='#CFCFCF', plot=True, node_size=2000,\n with_labels=True, arrows=True, layout='neato'):\n if type(node_color) is dict:\n node_color = [node_color.get(g, '#AFAFAF') for g in grph.nodes()]\n\n # set drawing options\n options = {\n 'prog': 'dot',\n 'with_labels': with_labels,\n 'node_color': node_color,\n 'edge_color': edge_color,\n 'node_size': node_size,\n 'arrows': arrows\n }\n\n # draw graph\n pos = nx.drawing.nx_agraph.graphviz_layout(grph, prog=layout)\n\n nx.draw(grph, pos=pos, **options)\n\n # add edge labels for all edges\n if edge_labels is True and plt:\n labels = nx.get_edge_attributes(grph, 'weight')\n nx.draw_networkx_edge_labels(grph, pos=pos, edge_labels=labels)\n\n # show output\n if plot is True:\n plt.show()", "def show_custom_graph(self):\n pass", "def viz_graph(self, show_ports=False, pydot_options=None):\n import networkx as nx\n G = nx.DiGraph()\n if pydot_options:\n G.graph['graph'] = pydot_options\n # instantiate objects\n for itask in self:\n task_inputs = itask[TaskSpecSchema.inputs]\n to_task = itask[TaskSpecSchema.task_id]\n to_type = itask[TaskSpecSchema.node_type]\n if to_task == \"\":\n to_task = OUTPUT_TYPE\n for iport_or_tid in task_inputs:\n # iport_or_tid: it is either to_port or task id (tid) b/c\n # if using ports API task_inputs is a dictionary otherwise\n # task_inputs is a list.\n taskin_and_oport = task_inputs[iport_or_tid] \\\n if isinstance(task_inputs, dict) else iport_or_tid\n isplit = taskin_and_oport.split('.')\n from_task = isplit[0]\n from_port = isplit[1] if len(isplit) > 1 else None\n if show_ports and from_port is not None:\n to_port = iport_or_tid\n common_tip = taskin_and_oport\n G.add_edge(from_task, common_tip, label=from_port)\n G.add_edge(common_tip, to_task, label=to_port)\n tnode = G.nodes[common_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n else:\n G.add_edge(from_task, to_task)\n\n # draw output ports\n if show_ports:\n\n if (to_type == OUTPUT_TYPE):\n continue\n task_node = get_node_obj(itask, tgraph_mixin=True)\n # task_outputs = itask.get(TaskSpecSchema.outputs, [])\n for pout in task_node._get_output_ports():\n out_tip = '{}.{}'.format(\n itask[TaskSpecSchema.task_id], pout)\n G.add_edge(to_task, out_tip, label=pout)\n tnode = G.nodes[out_tip]\n tnode.update({\n # 'label': '',\n 'shape': 'point'})\n return G", "def represent_graph(graph, n):\n \n fig, ax = plt.subplots()\n \n # Graphical representation : \n # each vertex define the absciss\n all_x = graph.keys()\n # ordinate is a random number between 0 and n \n all_y = n*np.random.rand(n)\n\n for vertex in graph:\n # for each vertex in the graph\n # get its coordinate \n x = vertex\n y = all_y[x]\n \n # represent it\n represent_vertex(ax, x, y)\n \n # get its neighbours\n neighbours = Neighbours(graph, vertex)\n \n for neighbour in neighbours :\n # for each neighbour of the vertex\n # draw an array from the vertex to its neighbour\n x_neighbour, y_neighbour = neighbour, all_y[neighbour]\n represent_link(ax, x, y, x_neighbour, y_neighbour)\n \n # Definition of the window\n plt.xlim(0,n)\n plt.ylim(0,n)\n plt.title('Graph')\n \n # Save the picture in Graph.png\n plt.savefig('Graph.png')\n plt.show()\n \n #return the graphical representation used\n return all_x, all_y", "def display_graph(self, color_of_vertex):\r\n import matplotlib.pyplot\r\n import networkx\r\n G = networkx.Graph()\r\n color_set = ['#FF0000', '#32CD32', '#FFD700', '#6B8E23', '#40E0D0', '#BA55D3', '#C0C0C0', '#A0522D', '#6A5ACD']\r\n color_map = []\r\n for k in self.__graph_dict.keys():\r\n G.add_node(k)\r\n for v in self.__graph_dict[k]:\r\n G.add_edge(k,v)\r\n for x in G.node:\r\n color_map.append(color_set[color_of_vertex[x]])\r\n networkx.draw(G, node_color = color_map, with_labels = True)\r\n matplotlib.pyplot.show()", "def show_dag(self, expand=set()):\n from matplotlib.pyplot import show as pltshow\n\n G = self.make_dag(expand=expand)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ## Plotting\n edge_labels = dict(\n [((u, v,), d[\"label\"]) for u, v, d in G.edges(data=True)]\n )\n n = G.size()\n\n ## Manual layout\n # if n == 2:\n if False:\n pos = {\n \"(var)\": [-0.5, +0.5],\n \"(out)\": [+0.5, -0.5],\n }\n pos[self.functions[0].name] = [+0.5, +0.5]\n ## Optimized layout\n else:\n try:\n ## Planar, if possible\n pos = nx.planar_layout(G)\n except nx.NetworkXException:\n ## Scaled spring layout\n pos = nx.spring_layout(\n G,\n k=0.6 * n,\n pos={\n \"(Inputs)\": [-0.5 * n, +0.5 * n],\n \"(Outputs)\": [+0.5 * n, -0.5 * n],\n },\n fixed=[\"(var)\", \"(out)\"],\n threshold=1e-6,\n iterations=100,\n )\n\n # Generate colormap\n color_map = []\n for node in G:\n if G.nodes[node][\"parent\"] == self.name:\n color_map.append(\"blue\")\n else:\n color_map.append(\"green\")\n\n # Draw\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n nx.draw(G, pos, node_size=1000, with_labels=True, node_color=color_map)\n pltshow()", "def plot_resiliences(nodes, network_vals, er_vals, upa_vals):\n node_vals = range(0, nodes)\n\n plt.plot(node_vals, network_vals, '-b', label='Network')\n plt.plot(node_vals, er_vals, '-r', label='ER')\n plt.plot(node_vals, upa_vals, '-g', label='UPA')\n\n plt.legend(loc='upper right')\n plt.ylabel('Size of Largest Connected Component')\n plt.xlabel('Number of Nodes Removed')\n plt.grid(True)\n plt.title('Comparison of Graph Resilience\\nMeasured by Largest Connected Component vs Nodes Removed by Target Attack\\n')\n plt.show()", "def show_edges(self):\n for element in self.graph:\n print(element, self.graph[element])", "def print_graph() -> None:\n raise NotImplementedError", "def draw_graph(self, node_size=2000, node_color='yellow', edge_color='red'):\n G, node_label_dict = self.make_graph()\n edge_label_dict = {(c.source_name, c.target_name):(c.params.kernel_size) for c in self.layers}\n plt.figure(figsize=(12,12))\n pos = nx.nx_pydot.graphviz_layout(G, prog='dot')\n nx.draw(G, pos, node_size=node_size, node_color=node_color, edge_color=edge_color,alpha=0.4)\n nx.draw_networkx_labels(G, pos, node_label_dict, font_size=10,font_weight=640, alpha=0.7, font_color='black')\n nx.draw_networkx_edge_labels(G, pos, edge_label_dict, font_size=20, font_weight=640,alpha=0.7, font_color='red')\n plt.show()", "def draw_graph(self, out_path):\n # Define layout for network, with increased distance between nodes\n spring_layout = nx.spring_layout(self.graph, k=math.sqrt(self.graph.order()))\n\n # Draw network nodes\n nx.draw_networkx_nodes(self.graph, spring_layout, node_size=10, node_color=\"steelblue\", alpha=0.7)\n # Draw network edges\n nx.draw_networkx_edges(self.graph, spring_layout, width=0.5, alpha=0.3)\n # Draw network labels\n nx.draw_networkx_labels(self.graph, spring_layout, font_size=5, verticalalignment=\"bottom\")\n\n # Save the graph\n plt.savefig(out_path, dpi=300, bbox_inches=\"tight\")", "def visualize_graph(self):\n self._graph.GetVertexData().AddArray(self._labels)\n self._graph.GetEdgeData().AddArray(self._weights)\n colors = vtk.vtkUnsignedCharArray()\n colors.SetNumberOfComponents(1)\n colors.SetName('Colors')\n types = int(245 / len(self._color_dict))\n for c in self._colors:\n colors.InsertNextValue(int(c * types))\n self._graph.GetVertexData().AddArray(colors)\n graphLayoutView = vtk.vtkGraphLayoutView()\n graphLayoutView.AddRepresentationFromInput(self._graph)\n graphLayoutView.SetLayoutStrategy(vtk.vtkSpanTreeLayoutStrategy())\n graphLayoutView.GetLayoutStrategy().SetEdgeWeightField(\"Weights\")\n graphLayoutView.GetLayoutStrategy().SetWeightEdges(1)\n graphLayoutView.GetRenderer().GetActiveCamera().ParallelProjectionOff()\n graphLayoutView.SetEdgeLabelArrayName(\"Weights\")\n graphLayoutView.SetEdgeLabelVisibility(1)\n graphLayoutView.SetVertexLabelArrayName('labels')\n graphLayoutView.SetVertexLabelVisibility(1)\n graphLayoutView.SetVertexColorArrayName('Colors')\n graphLayoutView.SetColorVertices(1)\n graphLayoutView.SetInteractorStyle(MouseAndKeysInteractor(graphLayoutView))\n graphLayoutView.ResetCamera()\n graphLayoutView.Render()\n graphLayoutView.GetInteractor().Start()", "def draw_graph(self):\n\t\tif None in self.graph:\n\t\t\tdel self.graph[None]\n\n\t\tfor vs in self.graph.itervalues():\n\t\t\tto_delete = []\n\t\t\tfor i in xrange(len(vs)):\n\t\t\t\tif vs[i] is None:\n\t\t\t\t\tto_delete.append(i)\n\n\t\t\tfor i in reversed(to_delete):\n\t\t\t\tdel vs[i]\n\n\t\tself.G=nx.Graph(self.graph)\n\n\t\tfor k,v in self.labels.iteritems():\n\t\t\tif v[:6] == 'Module':\n\t\t\t\troot = k\n\t\t\t\tbreak\n\n\t\treturn self.__dfs_plot(root)", "def draw_graph(G, pos=None):\n if not pos:\n pos = nx.spring_layout(G)\n\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 10\n fig_size[1] = 8\n\n plt.figure()\n\n nx.draw_networkx_nodes(G, pos)\n nx.draw_networkx_edges(G, pos)\n nx.draw_networkx_labels(G, pos)\n\n for node in G.nodes():\n x, y = pos[node]\n plt.text(x, y + 0.1, \"${}$\".format(latex(G.formula_conj(node))), fontsize=16, horizontalalignment='center')\n \n plt.axis(\"off\")", "def getGraphFigure(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n return fig", "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def fig4():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 0, 0],\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def print_model_graph(self, name=None, agent=([], [], [])):\n dot = pygraphviz.AGraph(directed=\"True\")\n for outp in list(self.outputs.keys()):\n dot.add_node(outp, pos=(outp[1:] + \",10\"), color=\"red\", label=outp + \", \" + str(self.outputs[outp].taking.size) + \"-\" + self.outputs[outp].taking.type)\n for inp in list(self.inputs.keys()):\n dot.add_node(inp, pos=(inp[1:] + \",0\"), color=\"blue\", label=inp + \", \" + str(self.inputs[inp].producing.size) + \"-\" + self.inputs[inp].producing.type)\n for comp in list(self.networks.keys()):\n dot.add_node(comp, label=comp + \"-\" + str(type(self.networks[comp].descriptor).__name__)[:-14] + \":\" + str(self.networks[comp].taking.size) + \"-\" + str(self.networks[comp].producing.size))\n\n for c in self.connections:\n con = self.connections[c]\n if self.conn_in_agent(con, agent[0]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"blue\")\n elif self.conn_in_agent(con, agent[1]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"red\")\n elif self.conn_in_agent(con, agent[2]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"green\")\n else:\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"black\")\n dot.layout('dot')\n if not name:\n name = str(hash(self))\n dot.draw(name + '.pdf')", "def visualize(self, filename, options = {'showHead'}):\n\t\tVisualizer.useGraphViz(self, filename, options)", "def plot_spn(root: Node, f: Union[IO, os.PathLike, str]):\n # Convert the SPN to a NetworkX directed graph\n graph = spn_to_digraph(root)\n\n # Build the dictionaries of node labels and colors\n labels = dict()\n colors = dict()\n for node_id in graph.nodes:\n attr = graph.nodes[node_id]\n name = attr['class']\n if name == Sum.__name__:\n label = '+'\n color = '#083d77'\n for child_id, _ in graph.in_edges(node_id):\n idx = graph.edges[child_id, node_id]['idx']\n graph.edges[child_id, node_id]['weight'] = round(attr['weights'][idx], ndigits=2)\n elif name == Product.__name__:\n label = 'x'\n color = '#bf3100'\n else:\n label = repr(attr['scope']).replace(',', '')\n color = '#542188'\n labels[node_id] = label\n colors[node_id] = color\n\n # Compute the nodes positions using PyDot + Graphviz\n pos = nx_pydot.graphviz_layout(graph, prog='dot')\n pos = {node_id: (x, -y) for node_id, (x, y) in pos.items()}\n pos = rescale_layout_dict(pos)\n\n # Set the figure size\n figdim = np.maximum(2, np.sqrt(graph.number_of_nodes() + 2 * graph.number_of_edges()))\n plt.figure(figsize=(figdim, figdim))\n\n # Draw the nodes and edges\n nx.draw_networkx(\n graph, pos=pos, node_color=[colors[node_id] for node_id in graph.nodes],\n labels=labels, arrows=True, font_size=8, font_color='#ffffff'\n )\n nx.draw_networkx_edge_labels(\n graph, pos=pos, edge_labels=nx.get_edge_attributes(graph, 'weight'),\n rotate=False, font_size=8, font_color='#000000'\n )\n\n # Plot the final figure\n plt.tight_layout()\n plt.axis('off')\n plt.savefig(f, bbox_inches='tight', pad_inches=0)\n plt.clf()", "def graph_genotype_GUI(genotype, subp):\n\n\t# create the networkx graph and associated node position for genotype\n\tgraph = genotype.gen_networkx_graph()\n\tpos = genotype.gen_positions_for_networkx(graph)\n\t\n\t# add all nodes into graph with colors\n\tfor node in genotype.nodes:\n\t\tcolor = NODE_TO_COLOR[node.getActKey()]\n\t\tnx.draw_networkx_nodes(graph, pos,\n\t\t\t\t\t\t\t\tax=subp, \n\t\t\t\t\t\t\t\tnodelist=[node.getNodeNum()],\n\t\t\t\t\t\t\t\tnode_color=color,\n\t\t\t\t\t\t\t\tnode_size=400, alpha=0.8)\n\t# add all connections into graph with colors\n\tfor con in genotype.connections:\n\t\tcolor = 'b' if con.getWeight() < 0 else 'r'\n\t\tedge_tuple = (con.getNodeIn().getNodeNum(), \n\t\t \t\t\t\tcon.getNodeOut().getNodeNum())\n\t\tnx.draw_networkx_edges(graph, pos,\n\t\t \t\t\t\t\t\tax=subp,\n\t\t \t\t\t\t\t\tedgelist = [edge_tuple],\n\t\t \t\t\t\t\t\twidth=3, alpha=0.5, \n\t\t \t\t\t\t\t\tedge_color=color, arrows=True)\n\t\t\n\t# add innovation number labels for connections\n\tlabels = nx.get_edge_attributes(graph, 'i')\n\tnx.draw_networkx_edge_labels(graph, pos, ax=subp, labels=labels)\n\n\t# create graph with title/legend and display\n\tplt.title(\"CPPN Genotype Visualization\")\n\tsubp.legend(handles=PATCH_LIST, loc='upper right')", "def draw_graph(graph, start, goal, path=[], save_file=None):\n explored = graph.get_explored_nodes()\n node_pos = {n: graph.nodes[n]['pos'] for n in graph.nodes.keys()}\n edge_labels = {}\n for edge in graph.edges():\n edge_labels[edge] = graph[edge[0]][edge[1]]['weight']\n\n labels = {}\n for node in graph:\n labels[node] = node\n\n nx.draw_networkx_nodes(graph, node_pos, node_color='gray') #, nodelist=romania.nodes, node_color='w', node_size=500)\n nx.draw_networkx_edges(graph, node_pos, style='dashed')\n if len(explored) > 0:\n print(\"Explored = \"+str(explored))\n nx.draw_networkx_nodes(graph, node_pos, nodelist=explored, node_color='r')\n\n if len(path) > 0:\n nx.draw_networkx_nodes(graph, node_pos, nodelist= path, node_color='y')\n edgelist = []\n for i in range(1,len(path)):\n edgelist.append((path[i - 1], path[i]))\n nx.draw_networkx_edges(graph, node_pos, edgelist, edge_color='b', width=3)\n nx.draw_networkx_nodes(graph, node_pos, nodelist=[start, goal], node_color='g')\n\n\n\n nx.draw_networkx_labels(graph, node_pos, labels)\n nx.draw_networkx_edge_labels(graph, node_pos, edge_labels, font_size=8)\n\n plt.axis('off')\n plt.show() # display\n if save_file is not None:\n plt.savefig(save_file) # save as png", "def plot_nodes(self, filename, **kwargs):\n\n g = graph.create_nx_graph(self.es, filename=filename, **kwargs)\n\n return g", "def plot_network(path, saveas=None, **kwargs):\n if saveas is None:\n saveas = \"_srcnetwork.html\"\n fn = FileNetwork(path, **kwargs)\n nt = Network(\"1500px\", \"1500px\")\n nt.toggle_physics(True)\n nt.from_nx(fn.network)\n nt.set_options(get_pyvis_options())\n nt.show(f\"{saveas}\")\n return", "def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()", "def showGraph(self):\r\n self.graph_button['state'] = 'disabled'\r\n # Draw connection Graph\r\n self.axGraph.set_visible(True)\r\n nx.draw(self.G, ax=self.axGraph, with_labels=True)\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def plot_bare_graph(self, show_plot=True, clf: bool = True):\n\n if clf:\n for i in plt.get_fignums():\n if plt.figure(i).get_label()[0:5] == \"(NXG)\":\n plt.close(plt.figure(i).get_label())\n # Close plot with the same name as the one we're creating (if applies)\n for i in plt.get_fignums():\n if plt.figure(i).get_label() == f\"(NXG) GEU {self.catalog}\":\n plt.close(f\"(NXG) GEU {self.catalog}\")\n # Create plot\n plt.figure(f\"(NXG) GEU {self.catalog}\")\n\n # Set node colors by domain\n\n domain_palette = ['#74299E',\n '#235785',\n '#7C1F48',\n '#B48121',\n '#5D6814',\n '#0F5A0F',\n '#818E19',\n '#1818A8',\n '#0300A7']\n colors = {'TRANSPORTE - TX': domain_palette[0],\n 'TRANSPORTE - DX': domain_palette[1],\n 'TX - RADIOENLACES Y SATELITAL': domain_palette[2],\n 'ACCESO - FIJA': domain_palette[3],\n 'ACCESO - MOVIL': domain_palette[4],\n 'CORE VOZ': domain_palette[5],\n 'ENTORNO': domain_palette[6],\n 'CMTS': domain_palette[7],\n 'Other': domain_palette[8]}\n\n # If GEU has many domains, paint each node with its corresponding color\n if self.has_multiple_domains:\n color_map = []\n for node in self.graph.nodes:\n for mat in self.materials:\n # If it finds a match, use object Material to get node's domain\n if mat.catalog == node:\n domain = mat.domain\n color_map.append(colors[domain])\n color_map_in_use = color_map\n # If that's not the case, the only color is the corresponding one\n else:\n try:\n color_map_in_use = colors[self.domain]\n except:\n color_map_in_use = domain_palette[7]\n\n # Plot graph\n nx.draw(self.graph, with_labels=True, node_color=color_map_in_use)\n if show_plot:\n plt.show()\n else:\n return plt", "def plot(self):\n return self.graph(edge_labels='words_in_out').plot()", "def plot_graph(graph, labels=None):\n if labels is not None:\n unique_labels = set([v for _, v in labels.items()])\n colors = np.arange(0, 1, 1. / len(unique_labels))\n colors_list = [colors[labels[node]] for node in graph.nodes]\n else:\n colors_list = None\n pos = networkx.spring_layout(graph)\n networkx.draw_networkx_nodes(graph, pos, cmap=plt.get_cmap('jet'), node_color=colors_list,\n node_size=500)\n networkx.draw_networkx_labels(graph, pos)\n networkx.draw_networkx_edges(graph, pos, edgelist=graph.edges, edge_color='r', arrows=True)\n plt.show()", "def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))", "def print_graph(self, filename='', save=False):\n nx.draw_circular(self.graph, node_color='pink', node_size=1000, with_labels=True)\n if save:\n plt.savefig(filename)\n print(f'Saved graph as {filename!r}')\n else:\n plt.show()", "def get_graphs(self):\n\n try:\n from keras.utils import plot_model\n from keras.utils.vis_utils import model_to_dot\n\n # from IPython.display import SVG\n\n plot_model(self.model, to_file=\"model.png\")\n plot_model(\n self.latent_to_states_model, to_file=\"latent_to_states_model.png\"\n )\n plot_model(self.batch_model, to_file=\"batch_model.png\")\n if self.mol_to_latent_model is not None:\n plot_model(self.mol_to_latent_model, to_file=\"mol_to_latent_model.png\")\n\n print(\"Models exported to png files.\")\n\n except:\n print(\"Check pydot and graphviz installation.\")", "def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c", "def draw(self):\n g = self.to_networkx()\n pos = nx.spring_layout(g)\n nx.draw_networkx_edges(g, pos,\n edge_color=EDGE_COLOR,\n width=EDGE_WIDTH)\n obj = nx.draw_networkx_nodes(g, pos, nodelist=self.vs.values(),\n node_size=NODE_SIZE,\n node_color=NODE_COLOR_NORMAL)\n obj.set_linewidth(NODE_BORDER_WIDTH)\n obj.set_edgecolor(NODE_BORDER_COLOR)\n nx.draw_networkx_nodes(g, pos, nodelist=self.fs,\n node_size=FACTOR_NODE_SIZE,\n node_color=FACTOR_NODE_COLOR,\n node_shape=FACTOR_NODE_SHAPE)\n nx.draw_networkx_labels(g, pos, {v: v.name\n for v in self.vs.values()},\n font_color=LABEL_COLOR)", "def draw_graph(self, fpath):\n import networkx as nx\n G = self.to_networkx()\n A = nx.nx_agraph.to_agraph(G)\n\n for proc in self.procs.values():\n nbunch = [proc.name]\n nbunch += [iport.absname() for iport in proc.iports.values()]\n nbunch += [oport.absname() for oport in proc.oports.values()]\n A.add_subgraph(\n nbunch, name='cluster_' + proc.name,\n color='lightgray', style='filled', fillcolor='lightgray')\n # color=lightgray;style=filled;fillcolor=lightgray;\n A.layout(prog='dot')\n A.draw(fpath)", "def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")", "def show_graph(edges, vertices, name):\n dot = Digraph(comment=name)\n\n # Add vertices to directed graph\n for v in vertices:\n dot.node(str(v[0]), v[1][\"read\"])\n\n # Add edges to directed graph\n for i, e in enumerate(edges):\n dot.edge(str(e[0]), str(e[1]), label=f\"{str(e[2]['weight'])}: {e[2]['match']}\")\n\n # Render graph and show it in browser\n dot.render(name, view=True)", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def plotStats(stats):\n line_width = 1\n\n font = {'family': 'DejaVu Sans',\n 'weight': 'bold',\n 'size': 10}\n\n matplotlib.rc('font', **font)\n\n plt.figure(1)\n aw = plt.subplot(211)\n aw.set_title(\"Graph grouping nodes by their number of relationships\")\n\n aw.plot(list(stats.keys()), list(stats.values()), 'r', linewidth=line_width)\n aw.grid(True)\n\n plt.xlabel('Number of edges')\n plt.ylabel('Number of nodes')\n plt.savefig(\"edgesStats.pdf\",\n dpi=300, format='pdf', papertype='a0')", "def draw_networkx(graph, ax=None, fig=None, nodes=True, edges=True, **kwargs):\n import networkx as nx\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig,ax = plt.subplots(1, 1, figsize=(5,5)) \n else:\n fig = ax.get_figure()\n \n \n # Determine a fine size for nodes\n bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n area = width * height * fig.dpi\n \n \n # format nx\n if isinstance(graph, nx.Graph):\n G = graph.copy()\n else:\n G = format_networkx(dict(graph), **kwargs)\n\n # pos\n pos = nx.get_node_attributes(G, \"pos\")\n if pos is None or not len(list(pos)):\n pos = get_layout_pos(G, **kwargs)\n kwargs.update(pos = pos)\n\n \n # draw edges\n if edges is True:\n edge_zorder = kwargs.pop(\"edge_zorder\", kwargs.pop(\"zorder\", None))\n if kwargs.get(\"width\") is None:\n edge_w = [np.sqrt(_) for u,v,_ in G.edges(data='size')]\n kwargs.update(width=edge_w)\n\n if kwargs.get(\"edge_color\") is None:\n edge_c = [_ for u,v,_ in G.edges(data=\"color\")]\n if not any(edge_c):\n edge_c = ['grey' for _ in G.edges()]\n kwargs.update(edge_color=edge_c)\n\n # only pass relevant keyword arguments to nx.draw_networkx_edges\n draw_networkx_edges_kwargs = getfullargspec(nx.draw_networkx_edges).args\n draw_networkx_edges_kwargs = {\n k: kwargs.get(k) for k in draw_networkx_edges_kwargs if k in kwargs\n }\n\n # draw\n edges = nx.draw_networkx_edges(G, ax=ax, **draw_networkx_edges_kwargs)\n if edge_zorder is not None:\n edges.set_zorder(edge_zorder)\n\n \n # draw nodes\n if nodes is True:\n node_s0 = 0.5 * np.pi * area / G.number_of_nodes()\n node_r = np.sqrt(node_s0 / np.pi)\n node_edge = node_r / 3\n node_edge = kwargs.pop(\"node_edge\", node_edge)\n node_edge_color = kwargs.pop(\"node_edge_color\", \"k\") \n node_zorder = kwargs.pop(\"node_zorder\", kwargs.pop(\"zorder\", None))\n\n if kwargs.get(\"node_size\") is None:\n node_s = [node_s0 * np.sqrt(_) for n,_ in G.nodes(data=\"size\")]\n kwargs.update(node_size=node_s)\n\n if kwargs.get(\"node_color\") is None:\n node_c = [_ for n,_ in G.nodes(data=\"color\")]\n if not any(node_c):\n node_c = [_ for n,_ in G.nodes(data=\"group\")]\n kwargs.update(node_color=node_c)\n \n # only pass relevant keyword arguments to nx.draw_networkx_nodes\n draw_networkx_nodes_kwargs = getfullargspec(nx.draw_networkx_nodes).args\n draw_networkx_nodes_kwargs = {\n k: kwargs.get(k) for k in draw_networkx_nodes_kwargs if k in kwargs\n }\n\n # draw\n nodes = nx.draw_networkx_nodes(G, ax=ax, **draw_networkx_nodes_kwargs)\n if node_zorder is not None:\n nodes.set_zorder(node_zorder)\n if node_edge > 0:\n nodes.set_edgecolor(node_edge_color)\n nodes.set_linewidth(node_edge)\n \n # finish\n ax = despine(ax, **kwargs)\n return nodes, edges", "def non_pol_neighbours_graph():\n data = pd.read_csv(\"/Users/emg/GitHub/thesis/output/2019_01/1000_residuals_output_utf8.csv\", index_col=0)\n\n labelled = label_subs(data)\n labelled['resid_rank'] = labelled.resid.rank(pct=True)\n top = subset_df(labelled, 'resid', q=0.95)\n\n edges = top.copy()[['source','target','resid']]\n edges_rev = edges.copy()\n edges_rev.columns = ['target','source','resid']\n directed_edges = pd.concat([edges,edges_rev], sort=True)\n directed_edges['resid_rank'] = directed_edges['resid'].rank(pct=True)\n\n df = label_subs(directed_edges)\n\n pol_subs = load_pol_subs()\n pol_names = pol_subs.subreddit.str.replace('\\\\','')\n pol_subs.subreddit=pol_subs.subreddit.str.replace('\\\\','')\n\n pol_neighbours = df[df['source'].isin(pol_names)].sort_values('resid', ascending=False)\n\n top_pol_neigh = pol_neighbours.groupby('source').head(10).sort_values(['source','resid'], ascending=[True,False])\n \n x = top_pol_neigh[~top_pol_neigh.target.isin(pol_names)][['source','target']]\n\n col_dict = pol_subs.set_index('subreddit').col.to_dict()\n for sub in x.target.unique():\n col_dict[sub] = 'gray'\n\n G = nx.from_pandas_edgelist(x)\n nx.set_node_attributes(G, col_dict, 'col')\n\n f = plt.figure(1)\n ax = f.add_subplot(1,1,1)\n\n colors = dict(G.nodes(data='col')).values()\n\n pos = nx.spring_layout(G, k=0.2)\n nx.draw_networkx(G, pos=pos, with_labels=False, node_color=colors, alpha=0.3)\n #nx.draw_networkx_labels(G, pos=pos, with_labels=True)\n\n plt.axis('off')\n f.set_facecolor('w')\n \n f.tight_layout()\n plt.savefig(figures_path(f\"{date}/non_pol_neighbours_graph.png\"))\n plt.close()", "def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def draw_graph(graph, node_positions):\n nx.draw_networkx_nodes(graph, node_positions, node_color=set_colors(graph),\n node_size=50)\n nx.draw_networkx_edges(graph, node_positions, width=0.3, alpha=0.5)", "def printGraph(tree, filename):\n G = pgv.AGraph() #Constructs a graph object\n for key in tree.keys():\n G.add_node(key)\n for subkey in tree[key].keys():\n G.add_node(subkey)\n G.add_edge(key,subkey,label=str(tree[key][subkey]),\\\n len=max(1, tree[key][subkey]))\n #length can't be less than 1, so that labels are readable\n\n G.draw(filename,prog=\"neato\")", "def graph(self, *links):\n\n groups = self._model_terms(links)\n fig, ax = plt.subplots()\n for group in groups:\n for term in group:\n termdata = self._term_data[term]\n termdata.graph(ax)\n\n return ax", "def plot(nodes=None, fig: Optional[plt.Figure] = None, ax=None, view: str = 'L', edge_weights=None, frames=None, edges=None, template=None, network=None,\n edge_color='k', node_size=1, node_color='salmon', node_type='circles', hemisphere='both', highlight_nodes=None, highlight_edges=None, **kwargs):\n # Load default settings, then update with kwargs\n profile = _load_profile(**kwargs)\n if network is not None:\n if nodes is not None or edges is not None:\n raise ValueError('Network keyword arugment is specified along with edges or nodes.')\n elif isinstance(network, nx.Graph):\n nodes, edges, = _from_networkx_input(network, **profile)\n else:\n raise ValueError('Unnown netowrk input')\n\n # Check and load the input of nodes and edges\n nodes, nodeimg, node_colorby, profile['node_columnnames'] = _process_node_input(\n nodes, profile['nodes_df'], node_color, profile['node_columnnames'], template, profile['template_voxelsize'])\n edges, edge_weights = _process_edge_input(edges, edge_weights, **profile)\n # Set up legend row\n # TODO compact code into subfunction\n legends = None\n legendrows = 0\n if isinstance(profile['showlegend'], list):\n legends = profile['showlegend']\n legendrows = len(legends)\n elif profile['showlegend'] is True:\n # Only plot size legend is sphere/circle and string or list input\n # TODO setup_legend is a little clunky and could be fixed\n if node_type != 'parcel' and not isinstance(node_size, (float, int)):\n node_sizelegend = profile['node_sizelegend']\n legends = _setup_legend(\n node_size, node_sizelegend, 'node_size', legends)\n # Only plot color legend if colorby\n if node_colorby is not None:\n node_colorlegend = profile['node_colorlegend']\n legends = _setup_legend(\n node_colorby, node_colorlegend, 'node_color', legends)\n if legends is not None:\n legendrows = len(legends)\n\n # Figure setup\n # Get preset views\n if isinstance(view, str):\n if view.startswith('preset'):\n view, hemisphere = _get_presetviews(view)\n # Get number of non-legend rowsnon\n nrows, view, frames = _nrows_in_fig(view, frames)\n\n # if neither title nor subtitles are set, only view name(s) is/are shown\n if profile['subtitles'] == 'auto' and profile['title'] == 'auto':\n profile['subtitles'] = 'auto'\n profile['title'] = None\n # if title is set to None, nothing is shown (view name(s) is/are removed)\n elif profile['title'] is None and profile['subtitles'] == 'auto':\n profile['subtitles'] = None\n\n if type(profile['subtitles']) is list:\n if len(profile['subtitles']) != frames*nrows:\n raise ValueError('Length subtitles must be equal to number of sub-plots')\n\n # Init figure, if not given as input\n if ax is None:\n fig, gridspec = _init_figure(frames, nrows, legendrows)\n else:\n expected_ax_len = (nrows * frames)\n ax, gridspec = _check_axinput(ax, expected_ax_len)\n\n # Set node_color to colorby argument\n if node_colorby is not None:\n node_color = _get_colorby_colors(nodes, node_colorby, **profile)\n if isinstance(edge_color, str) and edges is not None:\n if edge_color in edges:\n edge_color = _get_colorby_colors(edges, edge_color, 'edge', **profile)\n if highlight_nodes is not None and highlight_edges is not None:\n raise ValueError('Cannot highlight based on edges and nodes at the same time.')\n if highlight_nodes is not None:\n node_color, highlight_nodes, profile['node_alpha'] = _highlight_nodes(\n nodes, node_color, highlight_nodes, **profile)\n\n if highlight_edges is not None:\n edges, highlight_edges = _process_highlightedge_input(edges, highlight_edges, **profile)\n edge_color, highlight_edges, profile['edge_alpha'] = _highlight_edges(edges, edge_color, highlight_edges, **profile)\n # Get the nodes that are touched by highlighted edges\n nodes_to_highlight = edges[highlight_edges == 1]\n nodes_to_highlight = np.unique(nodes_to_highlight[profile['edge_columnnames']].values)\n node_color, highlight_nodes, profile['node_alpha'] = _highlight_nodes(\n nodes, node_color, nodes_to_highlight, **profile)\n\n # Rename ax as ax_in and prespecfiy ax_out before forloop\n ax_in = ax\n ax_out = []\n scaled_nodes = False\n # TODO remove double forloop and make single forloop by running over nrows and frames\n # TODO add test for single image across frames and copy axis for speed.\n for ri in range(nrows):\n # Get the azim, elev and arrowaxis for each row\n azim, elev, arrowaxis_row, viewtype = _get_view(\n view[ri], frames, arrowaxis=profile['arrowaxis'])\n for fi in range(frames):\n axind = (ri * nrows) + fi\n # get_frame_input allows input arguments to be string or list of different arguments for different plots\n hemi_frame = get_frame_input(hemisphere, axind, ri, fi, nrows, frames)\n subtitle_frame = get_frame_input(profile['subtitles'], axind, ri, fi, nrows, frames)\n template_style_frame = get_frame_input(profile['template_style'], axind, ri, fi, nrows, frames)\n # Set up subplot\n if ax_in is None:\n # Dont use 3d projection for connectivity matrices\n if viewtype[fi] == 'c':\n ax = fig.add_subplot(gridspec[ri, fi])\n else:\n ax = fig.add_subplot(gridspec[ri, fi], projection='3d')\n elif isinstance(ax_in, list):\n # here ax can only be a 1d list, not 2d list.\n ax = ax_in[axind]\n else:\n ax = ax_in\n affine = None\n if template is not None and viewtype[fi]=='b':\n affine = _plot_template(ax, template_style_frame, template,\n hemisphere=hemi_frame,\n azim=azim[fi], elev=elev[fi],\n **profile)\n\n # Template voxels will have origin at 0,0,0\n # It is easier to scale the nodes from the image affine\n # Then to rescale the ax.voxels function\n # So if affine is not None, nodes get scaled in relation to origin and voxelsize,\n # If node coords are derived from nodeimg, this has already been taken care of.\n if nodes is not None and nodeimg is None and viewtype[fi]=='b' and scaled_nodes == False:\n nodes = _scale_nodes(nodes, profile['node_columnnames'], affine)\n scaled_nodes = True\n # nodes and subplot may change for each frame/subplot\n # e.g. if hemisphere is specified\n nodes_frame = None\n if nodes is not None and viewtype[fi]=='b':\n nodes_frame = nodes.copy()\n nodes_frame = _select_single_hemisphere_nodes(\n nodes_frame, profile['node_columnnames'][0], affine, hemi_frame)\n\n if node_type == 'spheres':\n _plot_spheres(ax, nodes_frame, node_color=node_color,\n node_size=node_size, **profile)\n elif node_type == 'circles':\n _plot_nodes(ax, nodes_frame, node_color=node_color,\n node_size=node_size, **profile)\n elif node_type == 'parcels':\n _plot_parcels(ax, nodeimg, cmap=node_color,\n hemisphere=hemi_frame, **profile)\n if edges is not None and viewtype[fi]=='b':\n edges_frame = edges.copy()\n _plot_edges(ax, nodes_frame, edges_frame, edgewidth=edge_weights,\n edge_color=edge_color, highlight_nodes=highlight_nodes, **profile)\n if arrowaxis_row is not None and viewtype[fi]=='b':\n _add_axis_arrows(ax, dims=arrowaxis_row,\n origin=profile['arroworigin'],\n azim=azim[fi], elev=elev[fi], **profile)\n if viewtype[fi] == 's' and nodes is not None and edges is not None:\n _plot_springlayout(ax, nodes=nodes, edges=edges, node_color=node_color, node_size=node_size,\n edge_color=edge_color, edge_weights=edge_weights, highlight_nodes=highlight_nodes, **profile)\n if viewtype[fi] == 'c' and edges is not None:\n _plot_connectivitymatrix(ax, edges=edges, nodes=nodes, node_color=node_color, node_colorby=node_colorby, **profile)\n # Set view angle for 3d projections\n if viewtype[fi] != 'c':\n ax.view_init(azim=azim[fi], elev=elev[fi])\n\n _add_subplot_title(ax, azim[fi], elev[fi], subtitle_frame, hemi_frame, viewtype[fi], **profile)\n _add_title(fig, **profile)\n\n if viewtype[fi] != 'c':\n # Fix the aspect ratio\n ax.set_box_aspect([1, 1, 1])\n _set_axes_equal(ax)\n ax.axis('off')\n # Append ax to ax_out to store it.\n ax_out.append(ax)\n\n # Add legends to plot\n if legends is not None and profile['gif'] is False:\n for li, legend in enumerate(legends):\n # setup legend subplot. Goes in centre or centre2 subplots\n spind = gridspec.ncols\n legend_span = profile['legend_span']\n if legend_span is not None:\n if legend_span is int:\n legend_subplotp_colind = legend_span\n else:\n legend_subplotp_colind= slice(legend_span[0], legend_span[1])\n elif np.remainder(spind, 2) == 0:\n # if number of columns is even, center it over the middle two columns\n # by using slice() on the GridSpec.\n legend_subplotp_colind = slice(int((spind / 2) - 1), int(spind / 2) + 1)\n else:\n legend_subplotp_colind = int(np.round(spind / 2) - 1)\n ax = fig.add_subplot(gridspec[nrows + li, legend_subplotp_colind])\n if legend == 'node_size':\n ax = _add_node_size_legend(ax, nodes, node_size, **profile)\n if legend == 'node_color':\n ax = _add_node_color_legend(\n ax, nodes, node_colorby, node_color, **profile)\n ax.axis('off')\n #ax = _add_size_legend(ax, nodes, node_size, node_scale)\n ax_out.append(ax)\n\n # Title on top of the figure\n if profile['title'] is not None:\n _add_title(fig, **profile)\n\n fig.tight_layout()\n\n # If gif is requested, create the gif.\n if profile['gif'] is True:\n _plot_gif(fig, ax_out, profile['gif_duration'], profile['savename'], profile['gif_loop'])\n # Save figure if set\n elif profile['savename'] is not None:\n if profile['savename'].endswith('.png'):\n fig.savefig(profile['savename'], dpi=profile['fig_dpi'])\n elif profile['savename'].endswith('.svg'):\n fig.savefig(profile['savename'], dpi=profile['fig_dpi'])\n else:\n fig.savefig(profile['savename'] + '.png', dpi=profile['fig_dpi'])\n fig.savefig(profile['savename'] + '.svg', dpi=profile['fig_dpi'])\n\n return (fig, ax_out)", "def show(self, output_file=\"ast_viz.pdf\"):\n pos = radial_tree_layout(self.graph, self.graph.vertex(0))\n scale = self.graph.num_vertices()\n\n graph_draw(self.graph, vertex_text=self.graph.vp.type, # self.graph.vertex_index, #\n pos=pos, vertex_font_size=scale,\n output=output_file, output_size=(scale * 200, scale * 200))", "def show_graph(self):\n graph_file = self.dump_graph()\n subprocess.check_output(shlex.split(f'gwenview {graph_file}'))", "def _plot_graph(self) -> None:\n ghg_data, bird_data = self._datasets\n model = self._selection.get_model(ghg_data, bird_data)\n model.plot_data('Percent Change in Bird population (from 1970) vs '\n 'Amount of Greenhouse gas produced in a year',\n 'Amount of Greenhouse gas produced in a year (kt)',\n 'Percent Change in Bird population (from 1970)')", "def plot_graph(self):\r\n A = self.a_grid ; V = self.V1 ; Pol = self.Pol\r\n A_opt = A[Pol.astype(int)]\r\n \r\n fig = plt.subplots(figsize = (8,5))\r\n ax = [None,None]\r\n pltgrid = (1,2)\r\n \r\n ax[0] = plt.subplot2grid(pltgrid, (0,0))\r\n ax[1] = plt.subplot2grid(pltgrid, (0,1))\r\n \r\n ax[0].plot(A[:],V[:,0,0], linewidth = 2, color = 'blue', label = r'$V(a)$: Low $w$')\r\n ax[0].plot(A[:],V[:,0,5], linewidth = 2, color = 'green', label = r'$V(a)$: Median $w$')\r\n ax[0].plot(A[:],V[:,0,-1], linewidth = 2, color = 'red', label = r'$V(a)$: High $w$')\r\n \r\n ax[1].plot(A[:],A_opt[:,0,0], linewidth = 2, color = 'blue', label = r'$a\\'(a)$: Low $w$')\r\n ax[1].plot(A[:],A_opt[:,0,5], linewidth = 2, color = 'green', label = r'$a\\'(a)$: Median $w$')\r\n ax[1].plot(A[:],A_opt[:,0,-1], linewidth = 2, color = 'red', label = r'$a\\'(a)$: High $w$')\r\n ax[1].plot(A[:],A[:], linewidth = 2, color = 'violet', linestyle = 'dashed', zorder = 1)\r\n \r\n \r\n ax[0].set_xlabel(r'$a$') ; ax[0].legend()\r\n ax[1].set_xlabel(r'$a$') ; ax[1].legend()\r\n ax[0].set_title('Value function')\r\n ax[1].set_title('Asset policy')\r\n \r\n plt.tight_layout()\r\n plt.show()", "def connect_nodes(self):\n node1 = str(self.form.node1_text.toPlainText())\n node2 = str(self.form.node2_text.toPlainText())\n weight = str(self.form.weight_text.toPlainText())\n self.form.node1_text.clear()\n self.form.node2_text.clear()\n self.form.weight_text.clear()\n\n if not node1 or not node2 or not weight: \n self.show_dialog(\"Empty argument.\")\n return\n \n try:\n weight = int(weight)\n except:\n self.show_dialog(\"Weight should be an integer.\")\n return\n\n if self.G.has_edge(node1, node2):\n self.show_dialog(f\"Edge: {node1, node2} is already constructed.\")\n\n else:\n self.G.add_edge(node1, node2, weight=weight)\n self.form.plot_canvas.plot(self.G)", "def _showConnectionGraph(self):\n self._console_output(\"Creating connect graph...\")\n res = True\n\n u = InfoUI.function_orig_ea\n v = InfoUI.function_dest_ea\n\n cg = self.ba.get_connect_graph(u, v)\n res = self.ba.show_connect_graph(cg)\n\n if not res:\n self._console_output(\n \"[x] No connection between %08x and %08x\" % (u, v),\n err = True)", "def draw_di_graph(graph_object, scale_by_degree=True):\n positions = nx.spring_layout(graph_object)\n if scale_by_degree:\n d = nx.degree(graph_object)\n keys, degrees = zip(*d)\n network = nx.draw(graph_object, nodelist=keys,\n node_size=[5*degree for degree in degrees],\n pos=positions, alpha=0.5, arrows=False)\n else:\n network = nx.draw(graph_object, pos=positions, node_size=50, alpha=0.5)\n # labels = nx.draw_networkx_labels(graph, pos=positions)\n return positions, network, plt.gca()", "def plot_sql(self, table_name=None):\n\t\tlabels={}\n\t\tif table_name in self.DiG:\n\t\t\tsubgraph = self.get_subgraph(table_name)\n\t\t\tfor node in subgraph:\n\t\t\t\tlabels[node] = node\n\t\t\tprint(labels)\n\t\t\tnx.draw_spring(subgraph,labels=labels,font_size=8,node_size=300,alpha=0.7)\n\t\t\tplt.draw()\n\t\t\tplt.show()\n\t\t\treturn True\n\t\tnx.draw_spring(self.DiG,labels=labels,font_size=8,node_size=300,alpha=0.7)\n\t\tplt.draw()\n\t\tplt.show()" ]
[ "0.8590499", "0.75382435", "0.7278241", "0.71995574", "0.70474607", "0.69660336", "0.6962253", "0.68822443", "0.6876223", "0.68665344", "0.685607", "0.6749594", "0.67388505", "0.67268574", "0.6711935", "0.6700764", "0.6693562", "0.6683998", "0.6630944", "0.6603868", "0.6555654", "0.65506417", "0.6545641", "0.6501951", "0.6494064", "0.6486673", "0.6464168", "0.64488745", "0.6447923", "0.64304465", "0.6415688", "0.6403753", "0.6398126", "0.63911945", "0.6385197", "0.6384559", "0.63654834", "0.63628954", "0.63526857", "0.633873", "0.63351053", "0.63297164", "0.631047", "0.6308115", "0.6286033", "0.6284464", "0.6254339", "0.62388074", "0.62382215", "0.6229956", "0.6221604", "0.61831737", "0.6178354", "0.61769956", "0.6148595", "0.6138621", "0.610305", "0.6090716", "0.60849035", "0.6082893", "0.6081486", "0.6072982", "0.60720545", "0.6068062", "0.6040469", "0.60297054", "0.6028974", "0.602871", "0.6024641", "0.60167295", "0.60121477", "0.6009968", "0.5992382", "0.5989696", "0.5987085", "0.59797823", "0.5977288", "0.5966678", "0.5963592", "0.5956104", "0.5942831", "0.5942096", "0.5941181", "0.5928592", "0.59167486", "0.5906567", "0.5900735", "0.59002036", "0.59001243", "0.5898288", "0.5887703", "0.58873457", "0.58750564", "0.5873745", "0.58701354", "0.58605784", "0.5852359", "0.5846851", "0.58458716", "0.5844112" ]
0.7211741
3
Compute the graph diameter(s). If the graph contains several independent sub graph, returns a list the diamater of each of the subgraphs.
def graph_diameter(variables, relations): diams = [] g = as_networkx_graph(variables, relations) components = (g.subgraph(c).copy() for c in nx.connected_components(g)) for c in components: diams.append(nx.diameter(c)) return diams
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def diameter(self):\n\n v = self.vertices()\n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_path(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_paths.append(smallest)\n\n smallest_paths.sort(key=len)\n\n # Print the list smallest_paths\n\n # Longest path is at the end of list\n # ie diameter corresponds to length of this path\n\n diameter = len(smallest_paths[-1]) -1\n return diameter", "def find_diameter(self):\n all_ways = []\n for vertex1 in self.graph.keys():\n for vertex2 in self.graph.keys():\n if vertex2 != vertex1:\n result = self.pathFinder(vertex1, vertex2)\n for path in result:\n all_ways.append(len(path) - 1)\n self.diameter = max(all_ways)\n print(f\"Diameter of network is {self.diameter}\")", "def topo_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n distance.append(len(pathlist[k]) - 1)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.topodiameter = Temp", "def diameter(graph):\r\n max_distance = 0\r\n for vertex in graph:\r\n new_dist = max_dist(graph, vertex)\r\n if new_dist > max_distance:\r\n max_distance = new_dist\r\n return max_distance", "def spatial_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n Temp2 = 0\n for m in range(len(pathlist[k]) - 1):\n Temp2 += self.Dismatrix[pathlist[k][m], pathlist[k][m+1]]\n distance.append(Temp2)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.diameter = Temp", "def diameter(self):\n return self._diameter", "def diameter(self):\n return self._diameter", "def undirected_diameter(self) -> int:\n return nx.diameter(self.to_undirected())", "def num_of_subgraphs(self):\n \n G = self.to_undirected_graph()\n \n count = G.num_subgraph()\n \n print('The number of disconnected components in the graph is ', count)", "def get_pupil_diameter(dlc):\r\n diameters = []\r\n # Get the x,y coordinates of the four pupil points\r\n top, bottom, left, right = [np.vstack((dlc[f'pupil_{point}_r_x'], dlc[f'pupil_{point}_r_y']))\r\n for point in ['top', 'bottom', 'left', 'right']]\r\n # First compute direct diameters\r\n diameters.append(np.linalg.norm(top - bottom, axis=0))\r\n diameters.append(np.linalg.norm(left - right, axis=0))\r\n\r\n # For non-crossing edges, estimate diameter via circle assumption\r\n for pair in [(top, left), (top, right), (bottom, left), (bottom, right)]:\r\n diameters.append(np.linalg.norm(pair[0] - pair[1], axis=0) * 2 ** 0.5)\r\n\r\n # Ignore all nan runtime warning\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\r\n return np.nanmedian(diameters, axis=0)", "def _compute_dist_cartesian(graph):\r\n for edge in graph.edges():\r\n node1, node2 = edge\r\n dx = np.abs(graph.nodes[node1]['xcoord'] - graph.nodes[node2]['xcoord'])\r\n dy = np.abs(graph.nodes[node1]['ycoord'] - graph.nodes[node2]['ycoord'])\r\n dist = np.round(np.sqrt(np.square(dx) + np.square(dy)), 5)\r\n graph.edges[node1, node2]['length'] = dist", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):\n\n\n path = [[],0 , 0]\n best_path = get_best_path(digraph, start, end, path, max_dist_outdoors, max_total_dist, best_path = None)\n\n if best_path[0] is None:\n raise ValueError('No work')\n else :\n return best_path[0]", "def diameter(self):\n return 2 * self.radius", "def total_edges_per_time_step(graphs: typ.Iterable[vtna.graph.Graph]) -> typ.List[int]:\n return [sum(edge.get_count() for edge in graph.get_edges()) for graph in graphs]", "def number_of_deviation_edges(self):\n return len(list(self.deviation_edges()))", "def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)", "def draw_di_graph(graph_object, scale_by_degree=True):\n positions = nx.spring_layout(graph_object)\n if scale_by_degree:\n d = nx.degree(graph_object)\n keys, degrees = zip(*d)\n network = nx.draw(graph_object, nodelist=keys,\n node_size=[5*degree for degree in degrees],\n pos=positions, alpha=0.5, arrows=False)\n else:\n network = nx.draw(graph_object, pos=positions, node_size=50, alpha=0.5)\n # labels = nx.draw_networkx_labels(graph, pos=positions)\n return positions, network, plt.gca()", "def Test_Diameter(Graph):\n\n Durchmesser = M_Graph.get_Diameter(Graph)\n KPS = float(sum(Durchmesser)) / float(len(Durchmesser))\n\n return KPS", "def directedDFS(digraph, start, end, maxTotalDist, maxDistOutdoors, path=[],shortest = None):\n #TODO\n #assumes graph is a Digraph\n #assumes start and end are nodes in graph\n def Dist(path):\n result = 0\n if path == None:\n return result\n if len(path) == 0:\n return result\n for i in range(len(path)-1):\n src = path[i]\n dest = path[i+1]\n for item in digraph.edges[src]:\n if item[0] == dest:\n result += item[1]\n return result \n \n # Helper function to calculate Total Outdoor Distance in a path\n def Out(path):\n result = 0\n if path == None:\n return result \n if len(path) == 0:\n return result\n for i in range(len(path)-1):\n src = path[i]\n dest = path[i+1]\n for item in digraph.edges[src]:\n if item[0] == dest:\n result += item[2]\n return result\n \n# Helper function using DFS method\n def DFS(graph, start, end, maxD, maxO, path = []):\n path = path + [start]\n if start == end:\n return path\n shortest = None\n distShort = maxD\n outShort = maxO\n for node in graph.childrenOf(start):\n if node not in path: #avoid cycles\n newPath = DFS(graph, node, end, maxD, maxO, path)\n distNew = Dist(newPath)\n outNew = Out(newPath)\n if newPath!= None and distNew <= maxD and outNew <= maxO:\n if not shortest or distNew < distShort: #check if shorter than shortest\n shortest = newPath\n distShort = distNew\n outShort = outNew\n return shortest\n\n result = DFS(digraph, start, end, maxTotalDist, maxDistOutdoors)\n if result == None:\n raise ValueError\n else:\n return result", "def get_diameter(self):\n\n if self.no_dist is False:\n dist = self.distance\n diam = dist * self.ang_size / 60. * np.pi/180. * ct._kpc_over_pc_\n self.diam = diam\n else:\n self.diam = -1 # use -1 to indicate unknown diameter\n\n return self.diam", "def number_of_direct_deviation_edges(self):\n return len(list(self.direct_deviation_edges()))", "def get_dependency_graph(self):\n return self.graph", "def in_degree_distribution (digraph) :\n\n in_degree_dist = dict ()\n in_degrees = compute_in_degrees (digraph)\n\n for node in in_degrees :\n if in_degrees[node] in in_degree_dist :\n in_degree_dist[in_degrees[node]] += 1\n else :\n in_degree_dist[in_degrees[node]] = 1\n\n return in_degree_dist", "def number_of_indirect_deviation_edges(self):\n return len(list(self.indirect_deviation_edges()))", "def get_diameter(node):\n if node is None:\n return 0\n else:\n diameter_root = get_max_path(node.left) + get_max_path(node.right) + 1\n #print 'max_path from {} is {}'.format(node.value, diameter_root)\n diameter_left = get_diameter(node.left)\n diameter_right = get_diameter(node.right)\n return max(diameter_left, diameter_right, diameter_root)", "def density(self):\n raise TypeError(\"The density function is not support on a Multigraph.\")", "def subgraphs_of_length(self, days=None, periods=None):\n graphs = []\n if days:\n sg_length = datetime.timedelta(days=days)\n else:\n sg_length = periods\n\n start_date = self.min_date\n end_date = start_date + sg_length\n done = False\n while not done:\n if start_date > self.max_date:\n break\n if end_date > self.max_date:\n # end_date = self.max_date\n done = True\n print(start_date, end_date)\n new = self.subgraph_within_dates(start_date, end_date)\n if new.nx_graph.number_of_edges():\n graphs.append(new)\n start_date += sg_length\n end_date += sg_length\n return graphs", "def graphDSD(database: str) -> int:\n\n db = _database(database)\n\n if db:\n return graph.graphDSD(database)\n\n else:\n return None", "def directedDFS(digraph, start, end, maxTotalDist, maxDistOutdoors):\n return DFShort(digraph, start, end, [], None, maxTotalDist, maxDistOutdoors)", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def ngraphs(self):\n return len(self.__graph_list)", "def getDihedrals(self):\n try:\n return self._dihedralList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"DIHEDRAL_FORCE_CONSTANT\"]\n phase=self._raw_data[\"DIHEDRAL_PHASE\"]\n periodicity=self._raw_data[\"DIHEDRAL_PERIODICITY\"]\n dihedralPointers = self._raw_data[\"DIHEDRALS_INC_HYDROGEN\"] \\\n +self._raw_data[\"DIHEDRALS_WITHOUT_HYDROGEN\"]\n self._dihedralList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole).conversion_factor_to(units.kilojoule_per_mole)\n for ii in range(0,len(dihedralPointers),5):\n if int(dihedralPointers[ii])<0 or int(dihedralPointers[ii+1])<0:\n raise Exception(\"Found negative dihedral atom pointers %s\"\n % ((dihedralPointers[ii],\n dihedralPointers[ii+1],\n dihedralPointers[ii+2],\n dihedralPointers[ii+3]),))\n iType=int(dihedralPointers[ii+4])-1\n self._dihedralList.append((int(dihedralPointers[ii])//3,\n int(dihedralPointers[ii+1])//3,\n abs(int(dihedralPointers[ii+2]))//3,\n abs(int(dihedralPointers[ii+3]))//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(phase[iType]),\n int(0.5+float(periodicity[iType]))))\n return self._dihedralList", "def depths(self):\n deps = []\n quads = self.getQuadrilaterals()\n groups = self._getGroupIndex()\n u_groups = np.unique(groups)\n ng = len(u_groups)\n for i in range(ng):\n q_ind = np.where(groups == u_groups[i])[0]\n nq = len(q_ind)\n top_deps = []\n bot_deps = []\n for j in range(nq):\n if j == 0:\n top0 = [quads[q_ind[j]][0].depitude]\n bot0 = [quads[q_ind[j]][3].depitude]\n top_deps = top_deps + top0\n bot_deps = bot_deps + bot0\n top_deps = top_deps + [quads[q_ind[j]][1].depitude]\n bot_deps = bot_deps + [quads[q_ind[j]][2].depitude]\n deps = deps + top_deps + bot_deps[::-1] + top0 + [np.nan]\n\n return np.array(deps)", "def getSubGraphs(self):\n\n self.subGraphs = []\n visited = {}\n queue = deque()\n\n for s in self.nodes:\n\n if s not in visited:\n subGraph = SubGraph()\n self.subGraphs.append(subGraph)\n else:\n continue\n\n queue.append(s)\n\n while len (queue) > 0:\n outDegree = 0\n node = queue.popleft()\n if node in visited:\n continue\n\n for u in node.adj:\n if u not in visited:\n outDegree += 1\n queue.append(u)\n\n\n subGraph.addNode(node, outDegree)\n visited[node] = True", "def get_dependency_subgraphs(graph, node_attrib='label', edge_attrib='label'):\n assert nx.is_directed_acyclic_graph(graph)\n for n in xrange(graph.number_of_nodes()):\n for subnodes in itertools.combinations(graph.nodes(), n+1):\n subgraph_candidate = graph.subgraph(subnodes)\n if is_dependency_subgraph(graph, subgraph_candidate,\n node_attrib=node_attrib,\n edge_attrib=edge_attrib):\n yield subgraph_candidate", "def graphs(self):\n return self.__graphs", "def edge_density(self) -> float:\n return self.number_of_edges() / (\n self.number_of_nodes() * self.number_of_nodes()\n )", "def get_dfs(self, s):\n results = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n self._dfs_recursive(s, visited, results)\n return results", "def get_dfs(self)->list:\n\t\tstack=[]\n\t\tdfs=[]\n\t\tstack.append(self)\n\t\twhile(len(stack)>0):\n\t\t\tnode=stack.pop(len(stack)-1)\n\t\t\tdfs.append(node.data)\n\t\t\tif(node.right!=None):\n\t\t\t\tstack.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tstack.append(node.left)\n\t\treturn dfs", "def calc_marginals_depdis(dep, dis, p):\n deldis = _calc_del(dis).reshape((1, 1, len(dis)))\n deldep = _calc_del(dep).reshape((1, len(dep), 1))\n p_dist = np.sum(p * deldep, axis=(0, 1))\n p_depth = np.sum(p * deldis, axis=(0, 2))\n p_depdis = np.sum(p * deldis * deldep, axis=0)\n\n # depth_mean = _get_median(dep, p_depth)\n # dist_mean = _get_median(dis, p_dist)\n depth_mean = _get_max(dep, p_depth)\n dist_mean = _get_max(dis, p_dist)\n return depth_mean, dist_mean, p_depdis, p_depth, p_dist", "def calculate_divided_differences(nodes):\n nodes_to_compute = []\n divided_differences = []\n for node in nodes:\n nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1]))\n\n divided_differences.append(tuple(nodes_to_compute))\n\n while len(nodes_to_compute) > 1:\n next_node_row = calculate_divided_differences_row(nodes_to_compute)\n divided_differences.append(tuple(next_node_row))\n nodes_to_compute = next_node_row\n\n return divided_differences", "def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))", "def deviation_edges(self, data=False):\n return self.edges_where({\"type\": \"deviation\"}, data)", "def get_diameter(self, method='volume'):\n\n if method == 'shape':\n pos = self.get_positions() - self.center\n d = 0.0\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(pos, n)\n d += r.max() - r.min()\n return d / len(self.surfaces)\n elif method == 'volume':\n V_cell = np.abs(np.linalg.det(self.lattice_basis))\n N_cell = len(self.atomic_basis)\n N = len(self)\n return 2.0 * (3.0 * N * V_cell / (4.0 * math.pi * N_cell)) ** (1.0/3.0)\n else:\n return 0.0", "def nodes_per_time_step(graphs: typ.Iterable[vtna.graph.Graph]) -> typ.List[int]:\n return [len(set(node for edge in graph.get_edges() for node in edge.get_incident_nodes())) for graph in graphs]", "def discreteComplexDecomposeGraph(self,graph):\n s=[complex(*graph[i]) for i in range(len(graph))]\n N=len(s)\n M=self.coefficients_number\n d=0\n c=[]\n for k in range(-M//2,M//2):\n d+=sum([s[n]*cmath.exp(2j*cmath.pi*k*n/N) for n in range(N)])/N\n c.append(d)\n return c", "def size(graph, weight=None, is_directed=None):\n\n if is_directed is None:\n is_directed = graph.is_directed()\n\n graph_degree = degree(graph, weight=weight)\n graph_size = sum(graph_degree.values())\n\n if is_directed:\n return graph_size\n\n return graph_size // 2 if weight is None else graph_size / 2", "def in_degree_distribution(digraph):\n\tdist_in_degree = {}\n\tzero_in_count = 0\n\t\n\t# Returns:\n\t# { key, i.e., in-degree, number of edges coming into a node: \n\t# value, i.e., int, number of nodes with this value for in-degree }\n\n\t# first, create a temporary 2d list, each interior list containing (1) a key or in-degree and (2) a value or number of nodes with this corresponding in-degree", "def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn", "def dset(self):\n\n a = 0.0\n b = 0.0\n sums = np.sum(self.descriptors, axis=0)\n for sum in sums:\n if sum > 0:\n if sum == self.d_length:\n b += 1.\n else:\n a += 1.\n return a / (a+b)", "def connected_deviation_edges(self, node):\n return self._connected_edges_predicate(node, self.is_deviation_edge)", "def __DPI_helper(self, inh, dep):\r\n list_of_dpi_relation = list()\r\n if inh is None or dep is None:\r\n logger.info(\"There are no DPI relations\")\r\n else:\r\n for parent_and_child in inh:\r\n parent = parent_and_child.attrib.get(\"ci\")\r\n child = parent_and_child.attrib.get(\"cj\")\r\n for relation in dep:\r\n if relation.attrib.get(\"ci\") == parent:\r\n dpi_tuple = (parent, child, relation.attrib.get(\"cj\"))\r\n list_of_dpi_relation.append(dpi_tuple)\r\n logger.debug(\"Found DPI: (%s, %s, %s)\" % (dpi_tuple[0], dpi_tuple[1], dpi_tuple[2]))\r\n list_of_dpi_relation = list(dict.fromkeys(list_of_dpi_relation))\r\n return list_of_dpi_relation", "def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs", "def connected_components(self) -> List[list]:\n for n in self.dw_graph.get_all_v().values():\n n.distance=0.0\n mega_list = []\n for n in self.dw_graph.get_all_v().values():\n if n.distance!=-10:\n mega_list.append(self.connected_component(n.node_id))\n return mega_list", "def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()", "def getNodesAndDistances():\n\n\tglobal width, height\n\n\t# First we generate the list\n\n\tprint \"\\tGetting node list...\"\n\t\n\tnodeDict = {}\n\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\ttheType = getSquare(x, y)\n\n\t\t\tprint \"\\t\\tGetting list for node (%d, %d) of type %d...\" % (x, y, theType)\n\n\t\t\ttempList = getNodeList(x, y, theType)\n\n\t\t\tif tempList == []:\n\t\t\t\tprint \"\\t\\t\\tNo nodes here.\"\n\t\t\telse:\n\t\t\t\tfor i in range(len(tempList)):\n\t\t\t\t\tnode = tempList[i]\n\t\t\t\t\tnodeName = node[0]\n\t\t\t\t\tnodeDict[nodeName] = node[1:]\t# Everything but the first element\n\t\t\t\t\tprint \"\\t\\t\\tAdded node '%s'...\" % nodeName\n\n\tprint \"\\tDone getting node list (%d nodes)...\" % (len(nodeDict.keys()))\n\tprint \"\"\n\n\t# Now that we've got that, we get a list of pairs\n\n\tpairList = getPairList(nodeDict)\n\n\t# Now we calculate the distance between every pair of nodes that connect\n\n\tprint \"\"\n\tprint \"\\tCreateing dictionary of distances between connected nodes...\"\n\n\tdistanceDict = {}\n\n\tfor tuple in pairList:\n\t\t(nodeA, nodeB) = tuple\n\t\tprint \"\\t\\tCalculating distance between '%s' and '%s'...\" % (nodeA, nodeB)\n\t\tdistance = distanceBetween(nodeA, nodeB, nodeDict)\n\t\tpairName = \"%s%s\" % (nodeA, nodeB)\n\t\tdistanceDict[pairName] = distance\n\t\tprint \"\\t\\t\\tDistace was %f.\" % (distance)\n\n\tprint \"\\tDone creating dictionary of node differences (%d pairs).\" % (len(distanceDict.keys()))\n\n\treturn nodeDict, distanceDict", "def linkDensity(self, time=None):\r\n listofDensities = list()\r\n for cell in self.cells:\r\n listofDensities.append(cell.cellDensity())\r\n return listofDensities", "def GetDivisions(self):\n ...", "def compute_in_degrees(digraph):\n num_degree = {}\n for dummy_node in digraph:\n num_degree[dummy_node] = 0\n for key in digraph:\n for node in digraph[key]:\n num_degree[node] += 1\n return num_degree", "def density(self) -> float:\n if self.is_directed():\n factor = 1\n else:\n factor = 2\n\n num_e = self._Impl.number_of_edges(directed_edges=True)\n num_v = self._Impl.number_of_vertices()\n\n density = (factor * num_e) / (num_v * (num_v - 1))\n return density", "def getDegrees(self):\n l = []\n for node in self.getNodes():\n l.append((node, len(self.graph[node])))\n\n return l", "def getDimensions():", "def get_number_of_spectra(graph):\n return sum([graph.node[c][\"size\"] for c in graph.nodes])", "def in_degree_distribution(digraph):\n in_degrees = compute_in_degrees(digraph)\n in_deg_dist = {}\n for key in in_degrees.keys():\n if in_deg_dist.has_key(in_degrees[key]):\n in_deg_dist[in_degrees[key]] += 1\n else:\n in_deg_dist[in_degrees[key]] = 1\n return in_deg_dist", "def calculate_divided_differences_row(nodes_to_compute):\n divided_differences = []\n\n if len(nodes_to_compute) == 1:\n return None\n\n for i in range(0, len(nodes_to_compute) - 1):\n child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1])\n child.calculate_value()\n divided_differences.append(child)\n\n for node in divided_differences:\n print(node, end='')\n\n print('\\n')\n return divided_differences", "def _get_nodes_and_edges(dag: DAGNode):\n\n edges = []\n nodes = []\n\n def _dfs(node):\n nodes.append(node)\n for child_node in node._get_all_child_nodes():\n edges.append((child_node, node))\n return node\n\n dag.apply_recursive(_dfs)\n return nodes, edges", "def compute_in_degrees (digraph) :\n in_degree = dict()\n\n # initialize the in-degree of each node with 0s\n for key in digraph :\n in_degree[key] = 0\n\n for node in digraph :\n for head_node in digraph[node] :\n in_degree[head_node]+=1\n\n return in_degree", "def path_length(graph, node_names):\n\n total = 0\n for i in range(0, len(node_names) - 1):\n total += graph.get_edge(node_names[i], node_names[i + 1]).length\n\n return total", "def make_size_list(self) -> list[int]:\n content_size = sum(self.content.values())\n child_lists = [child.make_size_list() for child in self.children.values()]\n child_sizes = sum([child[-1] for child in child_lists])\n total_size = content_size + child_sizes\n result_list = list(itertools.chain.from_iterable(child_lists)) + [total_size]\n return result_list", "def query_size(gal):\n try:\n result_table = Ned.query_object(gal)\n except:\n return 0 * u.arcsec\n ndiameters = result_table[\"Diameter Points\"]\n if ndiameters > 0:\n diam = Ned.get_table(gal, table='diameters')\n try:\n units = [_ for _ in diam[\"Major Axis Unit\"].data]\n except:\n units = [_.decode() for _ in diam[\"Major Axis Unit\"].data]\n idx = [i for i, un in enumerate(units) if un in\n [\"arcmin\", \"arcsec\", \"degree\"]]\n if len(idx) > 0:\n diam = diam[idx]\n sizes = [d[\"Major Axis\"] * u.Unit(d[\"Major Axis Unit\"]) for d in\n diam]\n sizes = np.array([s.to(\"arcsec\").value for s in sizes])\n return np.nanmax(sizes) * u.arcsec\n return 0 * u.arcsec", "def get_sides(vertices):\n return [dist(vertices[1], vertices[2]),\n dist(vertices[2], vertices[0]),\n dist(vertices[0], vertices[1])]", "def node_size(graph):\n adj = nx.betweenness_centrality(graph)\n return np.array([x * 1e3 for x in adj.values()])", "def get_dilations(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]\n dil_h, dil_w, dil_d = 1, 1, 1 # default values\n dilations = onnx_node.get_attribute_value('dilations', ()) # dilation along each filter axis\n\n if len(dilations) == 2: # ONNX input axes order NCHW\n dil_h, dil_w = dilations\n elif len(dilations) == 3: # ONNX input axes order NCHWD\n dil_h, dil_w, dil_d = dilations\n\n return dil_h, dil_w, dil_d", "def _get_diameter(self,filename,maxLen=3):\n filename = os.path.splitext(filename)[0] \n filename = os.path.split(filename)[1] \n filename = filename.split(\"_\",3)[2] \n diameter = filename \n return diameter", "def connected_components(graph):\n graphCopy = graph.copy()\n edges = graph.edges(data=True)\n edgeCapacity = 1.0 * np.array([property['capa'] for node1, node2, property in edges])\n percentile = np.percentile(edgeCapacity, 50.0)\n for node1, node2, property in edges:\n if property['capa'] <= percentile:\n graphCopy.remove_edge(node1, node2)\n connectedComponents = nx.connected_components(graphCopy)\n connectedComponentSizes = np.array([len(component) for component in connectedComponents])\n return(connectedComponentSizes)", "def get_dependency_graph(self, zero_index=False):\n directed_graph = nx.DiGraph()\n construction_objects = self.steps\n if zero_index:\n labels = range(len(construction_objects))\n else:\n labels = range(1, len(construction_objects) + 1)\n object_labels = dict(map(lambda x, y: (x, y), construction_objects, labels))\n\n directed_graph.add_nodes_from(object_labels.values())\n for shape in construction_objects:\n for dependency in shape.dependencies:\n directed_graph.add_edge(object_labels[dependency], object_labels[shape])\n\n return object_labels, directed_graph", "def in_degree_distribution(digraph):\n degree_distr = {}\n num_degree = compute_in_degrees(digraph)\n for node in num_degree:\n degree_distr[num_degree[node]] = degree_distr.get(num_degree[node],0) + 1\n return degree_distr", "def get_subgraph(graph, min_degree):\n ###TODO\n pass", "def in_degree_distribution(digraph):\n #return dict(collections.Counter((compute_in_degrees(digraph)).values()))\n idd = {}\n in_degrees = (compute_in_degrees(digraph)).values()\n for degree in in_degrees:\n if degree in idd:\n pass\n else:\n idd[degree] = in_degrees.count(degree)\n return idd", "def line_graph_forbidden_subgraphs():\n from sage.graphs.all import Graph\n from sage.graphs.generators.basic import ClawGraph\n graphs = [ClawGraph()]\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 2: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2, 3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3, 4]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 5: [2, 0, 1]\n }))\n\n graphs.append(Graph({\n 5: [0, 1, 2, 3, 4],\n 0: [1, 4],\n 2: [1, 3],\n 3: [4]\n }))\n\n graphs.append(Graph({\n 1: [0, 2, 3, 4],\n 3: [0, 4],\n 2: [4, 5],\n 4: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3, 4],\n 2: [3, 4],\n 3: [4]\n }))\n\n return graphs", "def compute_in_degrees(digraph):\n\t# print \"digraph:\", digraph\n\n\tin_degrees = {}\n\n\tfor node in digraph:\n\t\tin_degrees[node] = 0\n\n\tfor node in digraph:\n\t\t# print \"node:\", node\n\t\tfor element in digraph[node]:\n\t\t\t# print \"element:\", element\n\t\t\tif element in digraph:\n\t\t\t\t# print \"element in digraph:\", element, digraph, element in digraph\n\t\t\t\tin_degrees[element] += 1\n\n\t# print \"in_degrees:\", in_degrees\n\treturn in_degrees", "def getDivisors(n):", "def dijkstras(graph_list, start):\n # for returning\n shortest_dist = dict()\n shortest_parent = dict()\n\n # create an adjacency list from the graph definition, and a dict for keeping track of parents\n graph = {} # our adjacency list dict\n parent = {} # our parents dict\n\n for fr, to, w in graph_list:\n if fr not in graph:\n graph[fr] = [(to, w)]\n else:\n graph[fr].append((to, w))\n\n if fr not in parent:\n parent[fr] = None\n if to not in parent:\n parent[to] = None\n\n # in this implementation of dijkstras, we keep track of a list of visited nodes in order to use a heap as-is,\n # without having to modify the heap's weights\n visited = set()\n heap = [(0, start, None)] # distance, node, parent\n\n while heap:\n curr_dist, node, par = heapq.heappop(heap)\n\n # only visit this node if it's not visited yet. The first time we visit a node, because of the heap, it's\n # guaranteed that it's the shortest path\n if node not in visited:\n # visit this node\n visited.add(node)\n\n # update shortest dict and parent dict\n shortest_dist[node] = curr_dist\n parent[node] = par\n\n # we won't visit this node again, so compute the parent list here\n curr_parent_list = []\n pnode = node\n # work out shortest parent\n while pnode:\n curr_parent_list.append(pnode)\n pnode = parent[pnode]\n shortest_parent[node] = curr_parent_list[::-1]\n\n # add adjacent nodes if they haven't been visited\n if node in graph:\n for to, w in graph[node]:\n if to not in visited:\n heapq.heappush(heap, (curr_dist + w, to, node))\n\n print(shortest_dist)\n print(shortest_parent)", "def in_degree_distribution(digraph):\n # find in_degrees\n in_degree = compute_in_degrees(digraph)\n # initialize dictionary for degree distribution\n degree_distribution = {}\n # consider each vertex\n for vertex in in_degree:\n # update degree_distribution\n if in_degree[vertex] in degree_distribution:\n degree_distribution[in_degree[vertex]] += 1\n else:\n degree_distribution[in_degree[vertex]] = 1\n return degree_distribution", "def find_fractions():\n num_list = []\n den_list = []\n for n in range(10, 100):\n for d in range(10, 100):\n if d > n:\n x = n / d\n ln = list(str(n))\n ld = list(str(d))\n if (ln[0] == ld[1]) and (ln[0] != '0'):\n if ld[0] != '0':\n if (int(ln[1]) / int(ld[0])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n elif (ln[1] == ld[0]) and (ln[1] != '0'):\n if ld[1] != '0':\n if (int(ln[0]) / int(ld[1])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n else:\n continue\n return num_list, den_list", "def getPathLength(self, starlist):\n sum = 0\n for i in range(len(starlist)-1):\n sum += self.getDistance(starlist[i],starlist[i+1])\n return sum", "def cyclomaticComplexity (self):\n self.tarjan()\n return len(self.__edges) - len(self.__nodes) + 2 * len(self.__scc)", "def estimate_diameter(self, return_indices=False):\n greatest_diameter = -np.inf\n i,j,k = 0,0,1 # placeholders for max indices\n for c,contour in enumerate(self.contours):\n contour_array = contour.to_matrix()[:,:2]*self.scan.pixel_spacing\n # There's some edge cases where the contour consists only of \n # a single point, which we must ignore.\n if contour_array.shape[0]==1: continue\n \n # pdist computes the pairwise distances between the points.\n # squareform turns the condensed array into matrix where\n # entry i,j is ||point_i - point_j||.\n diameters = squareform(pdist(contour_array))\n diameter = diameters.max()\n\n if diameter > greatest_diameter:\n greatest_diameter = diameter\n i = c\n j,k = np.unravel_index(diameters.argmax(), diameters.shape)\n\n if not return_indices:\n return greatest_diameter\n else:\n return greatest_diameter, (i,j,k)", "def dimensions():", "def strongly_connected_component_subgraphs(G):\n cc=strongly_connected_components(G)\n graph_list=[]\n for c in cc:\n graph_list.append(G.subgraph(c))\n return graph_list", "def in_degree_distribution(digraph):\n in_degrees = compute_in_degrees(digraph)\n distribution = {}\n for dummy_key in in_degrees.keys():\n if distribution.has_key(in_degrees[dummy_key]) == False:\n distribution[in_degrees[dummy_key]] = 1\n else:\n distribution[in_degrees[dummy_key]] += 1\n# for dummy_key in in_degree_distribution.keys():\n# in_degree_distribution[dummy_key] = (float)(in_degree_distribution[dummy_key]) / len(digraph)\n \n return distribution", "def get_dist_from_nearest_ndd(digraph, ndds):\n \n # Get a set of donor-patient pairs who are the target of an edge from an NDD\n ndd_targets = set()\n for ndd in ndds:\n for edge in ndd.edges:\n ndd_targets.add(edge.tgt)\n\n # Breadth-first search\n q = deque(ndd_targets)\n distances = [999999999] * len(digraph.vs)\n for v in ndd_targets:\n distances[v.id] = 1\n\n while q:\n v = q.popleft()\n for e in v.edges:\n w = e.tgt\n if distances[w.id] == 999999999:\n distances[w.id] = distances[v.id] + 1\n q.append(w)\n\n return distances", "def calculate_d_vals(self) -> None:\n # Skip last point if path is non-cyclic\n point_inds = range(self.num_points) if self.is_cyclic else range(self.num_points - 1)\n for i in point_inds:\n z_i = self.points[i % self.num_points]\n z_j = self.points[(i + 1) % self.num_points]\n z_i.d_val = abs(z_i - z_j)", "def out_degree_average(digraph):\n count = 0\n for key in digraph.keys():\n count += len(digraph[key])\n return float(count)/len(digraph)", "def get_cidr_graphs_connection(self):\n return self.m_connection.cidr_graphs", "def depth_setsM(self):\n G = self.copy()\n depths = []\n while G.vertices() != []:\n outdegs = G.out_degree(labels=True)\n new_depth = [x for x in outdegs if outdegs[x]==0]\n depths.append(new_depth)\n G.delete_vertices(new_depth)\n return depths", "def order_dfs(self) -> List[Nodo]:\n # Nodos por buscar, es una pila\n pending: List[Nodo] = [self.root]\n # Nodos ya visitados\n visited: List[Nodo] = []\n\n # Mientras la pila tenga items\n while len(pending) > 0:\n # Procesar el primer elemento\n curr = pending.pop()\n visited.append(curr)\n\n # Agregar los hijos no visitados del nodo a la pila\n for child in reversed(curr.children):\n if child in visited:\n continue\n pending.append(child)\n\n return visited", "def internal_link_density(self, node_list):\n N = len(node_list)\n n_links = self.number_internal_links(node_list)\n if self.directed:\n return float(n_links) / (N * (N - 1))\n else:\n return 2 * float(n_links) / (N * (N - 1))" ]
[ "0.700528", "0.64061725", "0.6378661", "0.6351375", "0.6088722", "0.5843668", "0.5843668", "0.5829243", "0.57851386", "0.56062824", "0.55966824", "0.55673695", "0.55673695", "0.55229545", "0.5520967", "0.5497087", "0.54773235", "0.54561347", "0.5399723", "0.5395755", "0.5389739", "0.53170437", "0.5293705", "0.5268244", "0.5255574", "0.5241383", "0.52383834", "0.5229378", "0.52201384", "0.5211598", "0.51787454", "0.5161818", "0.51598054", "0.5159263", "0.5155518", "0.51312554", "0.51209897", "0.5119176", "0.5096829", "0.50944805", "0.5086151", "0.5081479", "0.50796497", "0.50790644", "0.50766844", "0.50742626", "0.50656587", "0.5064932", "0.5050073", "0.50389236", "0.5038298", "0.5032917", "0.502582", "0.49786773", "0.49721634", "0.49555457", "0.49550393", "0.49458474", "0.4918463", "0.4916049", "0.49097303", "0.49081093", "0.4906875", "0.4906572", "0.488694", "0.48665857", "0.4864158", "0.48341", "0.48211715", "0.4817592", "0.48150346", "0.4814228", "0.4810577", "0.48084703", "0.47991887", "0.4789407", "0.47839898", "0.478377", "0.47827795", "0.47805944", "0.4779145", "0.47775003", "0.47742268", "0.47732908", "0.4772575", "0.476996", "0.47523507", "0.4752235", "0.4739822", "0.47372788", "0.4734726", "0.47296843", "0.47186428", "0.47083813", "0.4706716", "0.46997982", "0.4699325", "0.46985248", "0.46945506", "0.4692274" ]
0.7837539
0
Generate all possible pairs from the list of given elements.
def all_pairs(elements): if len(elements) < 2: return [] elif len(elements) == 2: return [(elements[0], elements[1])] else: new_pairs = [] for elt in elements[1:]: new_pairs.append((elements[0], elt)) return all_pairs(elements[1:]) + new_pairs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]", "def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj", "def __unordered_pairs(l):\n\n return [(l[i], l[j]) for i in range(len(l) - 1) for j in range(i + 1, len(l))]", "def list_to_pairs(l):\n return {(l[2*i], l[2*i+1]) for i in range(len(l)/2)}", "def split_in_pairs(arg: Iterable) -> Iterable[Tuple]:\n # We are using zip_longest with one clever hack:\n # https://docs.python.org/3/library/itertools.html#itertools.zip_longest\n # We create an iterator out of the list and then pass the same iterator to\n # the function two times. Thus the function consumes a different element\n # from the iterator each time and produces the desired result.\n iterator = iter(arg)\n return zip_longest(iterator, iterator)", "def get_pairs(terms):\n return itertools.combinations(terms, 2)", "def pairwise(lst):\r\n if not lst: return\r\n\r\n for i in range(len(lst)-1):\r\n yield lst[i], lst[i+1]\r\n yield lst[-1], None", "def pairs(lst):\n i = iter(lst)\n prev = next(i)\n for item in i:\n yield prev, item\n prev = item", "def pair_combos(iterable):\n pairs = set()\n for a in iterable:\n for b in iterable:\n pairs.add(a + b)\n return list(pairs)", "def triplewise(iterable):\n # triplewise('ABCDEFG') -> ABC BCD CDE DEF EFG\n for (a, _), (b, c) in pairwise(pairwise(iterable)):\n yield a, b, c", "def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)", "def get_pairs(my_list):\n return [(current, my_list[idx + 1] if - 1 else None) for idx, current in enumerate(my_list) if idx < len(my_list) - 1]", "def stagger_tuple(elements_list, initial=None):\n res = []\n previous_element = initial\n for element in elements_list:\n if previous_element is not None:\n res.append((previous_element, element))\n previous_element = element\n return res", "def make_pairs(sequence):\n length = len(sequence)\n return [\n (sequence[i], sequence[i + 1])\n for i in range(length - 1)\n ]", "def get_all_possible_os_pairings(indices_list):\n pairs = []\n itr = 0\n\n for links in indices_list:\n\n for item in links:\n for i in range(itr,len(links)):\n\n if item == links[i]:\n continue\n else:\n pair = item, links[i]\n pairs.append(pair)\n return pairs", "def ring_pairs(v):\n for i in range(len(v)):\n a = v[i]\n b = v[ (i+1) % len(v) ]\n yield (a,b)", "def generating_pairs(self, players_list) -> list[tuple[Player]]:\n apairing_players = []\n already_paired = []\n id_number = 0\n times_number_loop = 0\n breaks_number = 0\n while len(apairing_players) != 4:\n\n times_number_loop += 1\n if id_number == 8:\n id_number = 0\n pair = self.create_pair(players_list, id_number, already_paired)\n if pair is None:\n id_number += 1\n else:\n already_paired.append(pair[0])\n already_paired.append(pair[1])\n apairing_players.append(pair)\n id_number += 1\n if times_number_loop == 50:\n already_paired, apairing_players = self.break_pair(already_paired, apairing_players, breaks_number)\n times_number_loop = 0\n breaks_number += 1\n\n return apairing_players", "def getallpairs(self, x):\n result = []\n for u in range(len(x) - 1):\n result.extend([x[u] + a for a in x[u+1:]])\n\n return result", "def generate_pairs(self, _list_d):\n\n length = len(_list_d)\n result_list = {}\n\n for i in range(length):\n for j in xrange(i+1,length):\n l = len(result_list)\n result_list[l] = ((i, _list_d[i]),(j, _list_d[j]))\n\n return result_list", "def make_tag_pairs(self, input, start, end, elements):\n tps = TagPairs()\n for e in elements:\n k = [k for k in e.keys()][0]\n tps[k] = e[k]\n return tps", "def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a, a)", "def get_list_of_all_pairs_lists(input_lst):\n # handle possible case of empty list input\n if len(input_lst) == 0:\n return [[]]\n\n # base case - if list is two items long\n elif len(input_lst) == 2:\n return [[(input_lst[0], input_lst[1])]]\n\n else:\n combos = []\n first_item = input_lst[0] # first item in list\n\n # look at all items after first item - pair each with first item\n for i in range(1, len(input_lst)):\n\n pair = (first_item, input_lst[i])\n\n other_items_list = input_lst[1:i] + input_lst[i + 1 :]\n\n for rest in get_list_of_all_pairs_lists(other_items_list):\n\n combos.append([pair] + rest)\n\n return combos", "def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)", "def combinations(*args: List[Any]) -> List[List]:\n return list([list(el) for el in list(product(*args))])", "def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a)", "def pairwise(s: List[Any]) -> Iterator[Tuple[Any, Any]]:\n\n a, b = itertools.tee(s)\n next(b, None)\n return zip(a, b)", "def pairs(iterable):\n previous = None\n for item in iterable:\n current = item\n if previous is not None:\n yield previous, current\n previous = current", "def generate_pairs(number: int) -> List[List[int]]:\n return [\n [top, inner]\n for top in range(number + 1)\n for inner in range(top, number + 1)\n ]", "def generate_pairs_of_words(word_list):\n def pair_words(word_list, i, j, connector):\n return word_list[i] + connector + word_list[j]\n pairs = []\n n = len(word_list)\n for i in range(n-1):\n for j in range(i+1, n):\n pairs.append(pair_words(word_list, i, j, ' '))\n pairs.append(pair_words(word_list, j, i, ' '))\n pairs.append(pair_words(word_list, i, j, '-'))\n pairs.append(pair_words(word_list, j, i, '-'))\n pairs.append(pair_words(word_list, i, j, '_'))\n pairs.append(pair_words(word_list, j, i, '_'))\n pairs.append(pair_words(word_list, i, j, ''))\n pairs.append(pair_words(word_list, j, i, ''))\n outputs = list(set(pairs)) # remove duplicates\n return outputs", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return list(zip(a, b))", "def pairs_upto(n):\n return ((a, b)\n for a in range(1, n)\n for b in range(1, n)\n if a <= b)", "def palindromePairs(lst):\n results = []\n for i, e1 in enumerate(lst):\n for j, e2 in enumerate(lst):\n if i != j:\n if isPalindrome(e1+e2):\n results.append((i, j))\n return results", "def pairwise(iterable):\r\n a = iter(iterable)\r\n return izip(a, a)", "def _permute_generetably(elements: List[T], i: int):\n if i == 1:\n yield elements\n else:\n for j in range(i - 1):\n for permutation in _permute_generetably(elements, i - 1):\n yield permutation\n\n k = 0 if i % 2 == 1 else j\n\n elements[k], elements[i - 1] = elements[i - 1], elements[k]\n\n for permutation in _permute_generetably(elements, i - 1):\n yield permutation", "def pick_pairs(amount):\n return [(i,i+1,2) for i in range(0, amount, 2)]", "def pairwise(iterable: Iterable[Any]) -> Iterable[Any]:\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs", "def pairwise(s):\n return [(s[i - 1], s[i]) for i in range(1, len(s))]", "def pairs_from_list(lights):\n length = len(lights)\n half = int(length / 2)\n offset = 0\n\n centre = None\n if length % 2 == 1:\n centre = lights[half]\n offset = 1\n\n left = lights[:half]\n\n rh_start = half + offset\n right = reversed(lights[rh_start:])\n\n pairs = list(map(list, zip(left, right)))\n\n if centre:\n pairs.append([centre])\n\n return pairs", "def pair(first, second):\n return [first, second]", "def pairs(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None):\n return self._iter(txn, args, lo, hi, reverse, max, include)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def symbolize_pairs(list_of_pair_string: str) -> list:\n symbolized_pairs = []\n for pair in list_of_pair_string:\n symbolized_pairs.append(pair[0] + '-' + pair[1])\n\n return symbolized_pairs", "def __get_all_combinations(self, list_of_items):\r\n return [itertools.combinations(list_of_items, index+1)\r\n for index in range(len(list_of_items))]", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable: Iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def adjacent_pairs(self, nodes: Tuple[int], k: int) -> List[Tuple[int, int]]:\n n = len(nodes)\n return [(u, nodes[j % n])\n for i, u in enumerate(nodes)\n for j in range(i + 1, i + 1 + k // 2)]", "def CombinationMethods(nums, elements_number):\n res = list(c(nums, elements_number))\n return res, Combination(len(nums), elements_number)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n\n return zip(a, b)", "def all_perms(elements):\n if len(elements) <=1:\n yield elements\n else:\n for perm in all_perms(elements[1:]):\n for i in range(len(elements)):\n # nb elements[0:1] works in both string and list contexts\n yield perm[:i] + elements[0:1] + perm[i:]", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.zip_longest(a, b)", "def combine_params(param_list):\n\n\tif sum(isinstance(l, list) for l in param_list) > 1:\n\t\treturn list(map(list, list(itertools.product(*param_list))))\n\telse:\n\t\treturn [[p] for p in param_list]", "def permutations(elements, transform: Transform = None):\n permutations = []\n size = len(elements)\n actions = [-1, -1]\n\n _permute_iterably2(elements, permutations, size, actions, transform)\n return permutations", "def get_all_pairs(G):\n # list all (start,dest) pairs between which the route must be computed\n pairs_list = [(start, dest) for dest in G.nodes for start in G.nodes]\n\n # shuffle all elements in-place\n random.shuffle(pairs_list)\n\n # generate a set from the list\n pairs_set = set(pairs_list)\n\n return pairs_list, pairs_set", "def pairwise(iterable):\n previous, current = None, None\n \n for current in iterable:\n if previous:\n yield previous, current\n previous = current\n if current:\n yield current, None", "def pairwise(iterable):\n # copied from itertools docs\n from itertools import tee\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def make_pairs(txt):\r\n \r\n lista = []\r\n string = \"\" \r\n count = 0\r\n \r\n if len(txt)%2 != 0 :\r\n count = 1\r\n \r\n for i in range(len(txt)):\r\n \r\n string += txt[i]\r\n count += 1\r\n \r\n if count == 2:\r\n lista.append(string)\r\n string = \"\"\r\n count = 0\r\n \r\n return lista", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return izip(a, b)", "def addpairs(params, cur_args, to_list):\n if params.null() and cur_args.null():\n return to_list\n if params.atom() or cur_args.atom():\n raise error.LispException(\"pairs cannot be atoms\")\n pair = SExp(params.car(), cur_args.car())\n return addpairs(params.cdr(), cur_args.cdr(), SExp(pair, to_list))", "def get_pairs(N, row, col):\n pairs = np.array(list(combinations(range(N), 2)))\n pairs = np.column_stack((pairs, np.zeros(len(pairs), dtype=int)))\n # fill in edges\n for (r, c) in zip(row, col):\n k = r * (2 * N - r - 1) / 2 - r + c - 1\n pairs[int(k), 2] = 1\n\n return pairs", "def gen_4_tuples(it):\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))", "def gen_4_tuples(it):\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))", "def permute(lst):\n tups = []\n tup = []\n if len(lst) > 1:\n tup = [(lst[i],lst[j]) for i in range(len(lst)) for j in range(i+1, len(lst))]\n tups.extend(tup)\n return tups", "def generate_pairs(self, all_walks):\n logging.info(['edge_types before generate pairs', self.edge_types])\n\n pairs = []\n skip_window = self.config['win_size'] // 2\n for layer_id, e_type in enumerate(self.edge_types):\n walks = all_walks[e_type]\n for walk in tqdm.tqdm(walks):\n for i in range(len(walk)):\n for j in range(1, skip_window + 1):\n if i - j >= 0 and walk[i] != walk[i - j]:\n neg_nodes = self.graph[e_type].sample_nodes(\n self.config['neg_num'])\n pairs.append(\n (walk[i], walk[i - j], *neg_nodes, layer_id))\n if i + j < len(walk) and walk[i] != walk[i + j]:\n neg_nodes = self.graph[e_type].sample_nodes(\n self.config['neg_num'])\n pairs.append(\n (walk[i], walk[i + j], *neg_nodes, layer_id))\n return pairs", "def triples(self):\n\n if len(self.words) < 3:\n return\n\n for i in range(len(self.words) - 2):\n yield (self.words[i], self.words[i+1], self.words[i+2])", "def merge_pairs(list, should_merge, merge):\n ret = []\n i = 0\n while i < len(list) - 1:\n a = list[i]\n b = list[i + 1]\n if should_merge(a, b):\n ret.append(merge(a, b))\n i += 2\n else:\n ret.append(a)\n i += 1\n if i == len(list) - 1:\n ret.append(list[i])\n return ret", "def _iter_pairs(graph):\n for u, v in set(graph.edges_iter()):\n yield u, v", "def get_pair(self):\n \n if isinstance(self._elements, (float, int)) and isinstance(self._membership_degrees, (float, int)):\n return list(zip(list([self._elements]), list([self._membership_degrees])))\n else:\n return list(zip(self._elements, self._membership_degrees))", "def find_pairs(words): \n pass", "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "def pairwise(ys):\n seq_length = len(ys)\n results = []\n for k, v in enumerate(ys):\n if k+1 == seq_length:\n break\n first = list(ys[0:k])\n pair = (ys[k], ys[k+1])\n rest = list(ys[k+2:])\n temp = first + [pair] + rest\n results.append(temp)\n return results", "def permutations(xs):\n if not xs:\n yield []\n else:\n for x, xs in selections(xs):\n for ys in permutations(xs):\n yield [x] + ys", "def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_selected_combinations: list[tuple[str, str]] = []\n for i in range(max(len(list_1), len(list_2))):\n all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))\n return all_selected_combinations", "def permutations(iterable):\n pass", "def pairwise(iter):\n from itertools import tee, izip\n it, it_next = tee(iter)\n next(it_next)\n for first, second in izip(it, it_next):\n yield first, second", "def make_pairs(innames):\n names = copy.deepcopy(innames)\n pairs = []\n if len(names) % 2 == 1:\n m = random.randint(0, len(names) - 1)\n singleton = names[m]\n del names[m]\n else:\n singleton = None\n while len(names) > 0:\n i = 0\n j = 0\n while i == j:\n i = random.randint(0, len(names) - 1)\n j = random.randint(0, len(names) - 1)\n # print(\"i is\", i, \"j is\", j)\n k = names[i]\n l = names[j]\n # print(\"k is\", k, \"l is\", l)\n if i > j:\n del names[i]\n del names[j]\n else:\n del names[j]\n del names[i]\n # print(\"names is\", repr(names))\n if singleton is None:\n pairs.append(set([k, l]))\n else:\n pairs.append(set([k, l, singleton]))\n singleton = None\n return pairs", "def compute_combinations(items: List[Union[List[Any], Tuple]], n: int) -> List[List[Any]]:\n return [chunks[i:i + n] for chunks in items for i in range(len(chunks) - (n - 1))]", "def generate_pairs_lists(\n top, molecule=None, sort_key=None, refer_from_scaling_factor=False\n):\n from gmso.external import to_networkx\n from gmso.parameterization.molecule_utils import (\n molecule_angles,\n molecule_bonds,\n molecule_dihedrals,\n )\n\n nb_scalings, coulombic_scalings = top.scaling_factors\n\n if sort_key is None:\n sort_key = top.get_index\n\n graph = to_networkx(top, parse_angles=False, parse_dihedrals=False)\n\n pairs_dict = dict()\n if refer_from_scaling_factor:\n for i in range(3):\n if nb_scalings[i] or coulombic_scalings[i]:\n pairs_dict[f\"pairs1{i+2}\"] = list()\n else:\n for i in range(3):\n pairs_dict = {f\"pairs1{i+2}\": list() for i in range(3)}\n\n if molecule is None:\n bonds, angles, dihedrals = top.bonds, top.angles, top.dihedrals\n else:\n bonds = molecule_bonds(top, molecule)\n angles = molecule_angles(top, molecule)\n dihedrals = molecule_dihedrals(top, molecule)\n\n if \"pairs12\" in pairs_dict:\n for bond in bonds:\n pairs = sorted(bond.connection_members, key=sort_key)\n pairs_dict[\"pairs12\"].append(pairs)\n\n if \"pairs13\" in pairs_dict:\n for angle in angles:\n pairs = sorted(\n (angle.connection_members[0], angle.connection_members[-1]),\n key=sort_key,\n )\n if (\n pairs not in pairs_dict[\"pairs13\"]\n and shortest_path_length(graph, pairs[0], pairs[1]) == 2\n ):\n pairs_dict[\"pairs13\"].append(pairs)\n\n if \"pairs14\" in pairs_dict:\n for dihedral in dihedrals:\n pairs = sorted(\n (\n dihedral.connection_members[0],\n dihedral.connection_members[-1],\n ),\n key=sort_key,\n )\n if (\n pairs not in pairs_dict[\"pairs14\"]\n and shortest_path_length(graph, pairs[0], pairs[1]) == 3\n ):\n pairs_dict[\"pairs14\"].append(pairs)\n\n for key in pairs_dict:\n pairs_dict[key] = sorted(\n pairs_dict[key],\n key=lambda pairs: (sort_key(pairs[0]), sort_key(pairs[1])),\n )\n\n return pairs_dict", "def unzip(pairs):\n return tuple(zip(*pairs))", "def toPairs(self):\n result = Pairs()\n for first, second in enumerate(self):\n if first < second:\n result.append((first, second))\n return result", "def pairwise(iterable: Iterable[Any]) -> Sequence[Any]:\n a, b = tee(iterable)\n next(b, None)\n return [\"\".join(t) for t in zip(a, b)]", "def permutations(lst):\n pass # Replace this with your implementation of the function.", "def get_synset_pairs(synset: Synset) -> list:\n # Remove phrasal expressions from the literals\n literals = remove_phrases(synset.literals)\n\n # Generate a list of unique pairs representing the cartesian product of the list of literals of the single synset\n pairs = unique([tuple(sorted((w1, w2), key=itemgetter(0))) for w1 in literals for w2 in literals if not w1 == w2])\n return pairs", "def pairwise(iterable):\r\n a, b = itertools.tee(iterable)\r\n next(b, None)\r\n return itertools.izip(a, b)", "def _get_argument_combinations(arguments):\n arg_names = sorted(arguments)\n combinations = itertools.product(*(arguments[arg] for arg in arg_names))\n combinations = [dict(zip(arg_names, arg_values)) for arg_values in combinations]\n return combinations", "def swap_pairs(z):\n group_names = unique(z)\n a1 = [idx for idx, val in enumerate(z) if val == group_names[0]]\n a2 = [idx for idx, val in enumerate(z) if val == group_names[1]]\n pairs = [zip(x, a2) for x in itertools.permutations(a1, len(a1))]\n return [x for i in pairs for x in i]", "def part_2():\n return itertools.permutations(range(5, 10))", "def getmulticombos(peeps):\n\n\tret = []\n\n\tfor p in peeps:\n\t\tu,s = getcombos(p)\n\n\t\tbestu = getbesttriplet(u)\n\t\tbests = getbesttriplet(s)\n\n\t\tret.append((bestu, bests))\n\n\treturn ret", "def merge_pairs(lpairs):\n \n pairs = np.unique(np.vstack(lpairs), axis=0)\n return pairs", "def joint_pairs(self):\n return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))", "def pairwise(iterable, include_tail=False):\n left, right = itertools.tee(iterable)\n next(right, None)\n if include_tail:\n right = itertools.chain(right, [None])\n\n return zip(left, right)", "def triples(self):\r\n\r\n if len(self.words) < 3:\r\n return\r\n\r\n for i in range(len(self.words) - 2):\r\n yield (self.words[i], self.words[i + 1], self.words[i + 2])", "def triplets(p):\n return [p[i:i+3] for i in range(3)]", "def get_2pairs():\n\n done = 0\n while not done:\n r0 = int(random(GRID_CELLS))\n c0 = int(random(GRID_CELLS))\n\n r1 = int(random(GRID_CELLS))\n c1 = int(random(GRID_CELLS))\n done = 1\n\n if random(1) < 0.5:\n # move one cell right\n ra1 = r0 + 1\n rb1 = r1 + 1\n ra0, rb0 = r0, r1\n ca0, cb0 = c0, c1\n ca1, cb1 = c0, c1\n\n if ra1 >= GRID_CELLS or rb1 >= GRID_CELLS:\n done = 0\n else: # move down:\n ca1 = c0 + 1\n cb1 = c1 + 1\n ca0, cb0 = c0, c1\n ra0, rb0 = r0, r1\n ra1, rb1 = r0, r1\n if ca1 >= GRID_CELLS or cb1 >= GRID_CELLS:\n done = 0\n\n return [((ra0, ca0), (rb0, cb0)), ((ra1, ca1), (rb1, cb1))]" ]
[ "0.7500274", "0.6851043", "0.6840614", "0.68141717", "0.6754132", "0.669949", "0.6683781", "0.6671177", "0.6668704", "0.65523124", "0.64457446", "0.64399433", "0.6394851", "0.636628", "0.6352524", "0.6351291", "0.6324047", "0.6315178", "0.6308765", "0.63008136", "0.62359875", "0.62253916", "0.62190825", "0.6217203", "0.6198219", "0.61822855", "0.61587", "0.61572254", "0.6155779", "0.6145708", "0.6067201", "0.60556483", "0.6049805", "0.60273594", "0.6026797", "0.6019757", "0.5976339", "0.5973855", "0.59554136", "0.59322715", "0.5920755", "0.59175295", "0.59175295", "0.59175295", "0.59175295", "0.5898667", "0.58860654", "0.5882408", "0.5882408", "0.5882408", "0.5877745", "0.5872893", "0.586403", "0.5863355", "0.5863355", "0.5845889", "0.5834372", "0.5832433", "0.58306247", "0.5825221", "0.58205825", "0.577348", "0.5756896", "0.57565135", "0.57363445", "0.57320845", "0.5731682", "0.5731682", "0.57287616", "0.5722647", "0.57202643", "0.5713608", "0.57083184", "0.57026476", "0.5692905", "0.5677053", "0.5675531", "0.5670164", "0.5667955", "0.5665883", "0.5664266", "0.5657612", "0.5657482", "0.5655444", "0.5639774", "0.5631574", "0.56296116", "0.5624988", "0.56091243", "0.55975026", "0.5592144", "0.55544984", "0.55484897", "0.55477244", "0.5547355", "0.55436426", "0.5542163", "0.5532898", "0.55059344", "0.55044854" ]
0.8119166
0
Takes an optimisation step by calculating gradients given the loss and then updating the parameters.
def take_optimisation_step( self, optimizer, network, loss, clipping_norm=None, retain_graph=False ): if not isinstance(network, list): network = [network] # reset gradients to 0 optimizer.zero_grad() # this calculates the gradients loss.backward(retain_graph=retain_graph) if clipping_norm is not None: for net in network: # clip gradients to help stabilise training nn.utils.clip_grad_norm_(net.parameters(), clipping_norm) # this applies the gradients optimizer.step()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_parameters(self, loss):\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def update_params(self, loss, step_size=0.5, first_order=False):\n #grads = torch.autograd.grad(loss, self.parameters(),\n # create_graph=not first_order)\n self.optim.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm(self.parameters(), self.grad_clip_norm)\n self.optim.step()\n #updated_params = OrderedDict()\n #self.relation_emb.zero_grad()\n #self.entity_emb.zero_grad()\n #for (name, param), grad in zip(self.named_parameters(), grads):\n '''\n for (name, param) in self.named_parameters():\n updated_params[name] = param.clone()\n if param.grad is not None:\n updated_params[name] -= step_size * param.grad\n\n return updated_params\n '''", "def update_params(self, loss, step_size=0.5, first_order=False):\n grads = torch.autograd.grad(loss, self.parameters(),\n create_graph=not first_order)\n updated_params = OrderedDict()\n for (name, param), grad in zip(self.named_parameters(), grads):\n updated_params[name] = param - step_size * grad\n\n return updated_params", "def optimize_step(self, loss, glbl_step):\n Opt = locate(\"tensorflow.train.\" + hparams.optimizer)\n if Opt is None:\n raise ValueError(\"Invalid optimizer: \" + hparams.optimizer)\n optimizer = Opt(hparams.l_rate)\n grads_vars = optimizer.compute_gradients(loss)\n capped_grads = [(None if grad is None else tf.clip_by_value(grad, -1., 1.), var)\\\n for grad, var in grads_vars]\n take_step = optimizer.apply_gradients(capped_grads, global_step=glbl_step)\n return take_step", "def update(params: hk.Params, opt_state: OptState, batch, labels, xent_weight=self.weights, l1_coeff=self.l1_coef, l2_coeff=self.l2_coef) -> Tuple[hk.Params, OptState]:\n grads = jax.grad(loss)(params, batch, labels, xent_weight, l1_coeff, l2_coeff)\n updates, opt_state = opt.update(grads, opt_state)\n new_params = optax.apply_updates(params, updates)", "def updateParams(self,gradients):\n for i in xrange(len(self.params)):\n self.params[i].set_value(self.params[i].get_value()-gradients[i]/(1/self.learning_rate+self.iterations))", "def optimize(self, loss):\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_param(self, lr):\n\n\n self.W=self.W-lr*self.W_grad\n self.b = self.b - lr*self.b_grad", "def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])", "def update(self, gradient, optimizer=None, relink=None):\n # Recover the defaults, if missing\n optimizer = self._resolve_defaults(optimizer=optimizer)[0]\n # Set the gradient\n self.set_gradient(gradient, relink=(self._config.relink if relink is None else relink))\n # Perform the update step\n optimizer.step()", "def wasserstein_g_update(loss, optimizer, var_list=None, global_step=None, name='g_update'):\n # grads = optimizer.compute_gradients(loss, var_list=var_list)\n # return optimizer.apply_gradients(grads, global_step=global_step, name=name)\n\n return optimizer.minimize(loss, global_step=global_step, var_list=var_list, name=name)", "def apply_gradient(params: torch.Tensor, grads: torch.Tensor, lr: float) -> torch.Tensor:\n params_prime = params + lr * grads\n return params_prime", "def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n beta1, beta2 = group['betas']\n base_lr = group['lr']\n max_norm = group['max_norm']\n fixed_norm = group['fixed_norm']\n zero_mean = group['zero_mean']\n normalizing_grad_by_norm = group['normalizing_grad_by_norm']\n eps = group['eps']\n weight_decay = group['weight_decay']\n l2_regularization = group['l2_regularization']\n bias_correction1 = 1\n for p in group['params']:\n if p.grad is None:\n continue\n pmax_norm, pfixed_norm, pzero_mean = _get_opt_args(\n p, max_norm, fixed_norm, zero_mean)\n lr = get_opt_arg(p, 'lr', base_lr)\n lr_scale = get_opt_arg(p, 'lr_scale', 1)\n state = self.state[p]\n state['step'] += 1\n bias_correction2 = 1 - beta2**state['step']\n grad = p.grad\n pweight_decay = get_opt_arg(p, 'weight_decay', weight_decay)\n pl2_regularization = get_opt_arg(p, 'l2_regularization',\n l2_regularization)\n if pl2_regularization != 0:\n grad = grad.add(p, alpha=pl2_regularization)\n if beta1 > 0:\n bias_correction1 = 1 - beta1**state['step']\n exp_avg = state['exp_avg']\n exp_avg.lerp_(grad, 1 - beta1)\n else:\n exp_avg = grad\n if normalizing_grad_by_norm:\n sq = _norm(grad)**2\n else:\n sq = grad**2\n state['exp_avg_sq'].lerp_(sq, 1 - beta2)\n denom = state['exp_avg_sq'].sqrt().add_(eps)\n if pweight_decay > 0:\n p.mul_(1 - lr_scale * lr * pweight_decay)\n # the exponential moving average of exp_avg and exp_avg_sq are not\n # unbiased estimate of the mean. Correct them using bas_correction1\n # and bias_correct2 as suggest by the original Adam paper.\n step_size = lr_scale * lr * math.sqrt(\n bias_correction2) / bias_correction1\n # p <- p - step_size * exp_avg / denom\n p.addcdiv_(exp_avg, denom, value=-step_size)\n _normalize(p, pmax_norm, pfixed_norm, pzero_mean)\n\n return loss", "def compute_grad(*, model: nn.Module, loss: Tensor) -> None:\n grad_list = torch.autograd.grad(loss, tuple(model.parameters()), retain_graph=True)\n\n for param, grad in zip(model.parameters(), grad_list):\n param.grad = grad", "def _apply_gradient_descent(self, gradients):\n updated_sd = {}\n global_model = self._get_global_model()\n \n for name, param, grad in zip(global_model.keys(), global_model.values(), gradients):\n updated_sd[name] = param - self.global_lr * grad\n \n self._load_global_model(updated_sd)", "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n apply_updates = self._opt.apply_gradients(grads_and_vars)\n with ops.control_dependencies([apply_updates]):\n local_update = state_ops.assign_add(\n self._local_step, 1, name='local_step_update').op\n\n # update global variables.\n def _Update_global_variables():\n local_vars = [v for g, v in grads_and_vars if g is not None]\n global_center_vars = [self._global_map[var] for var in local_vars]\n local_center_vars = [self._local_map[var] for var in local_vars]\n local_center_vars_update = []\n for lvar, var in zip(local_center_vars, global_center_vars):\n local_center_vars_update.append(lvar.assign(var))\n update_ops = []\n differences = []\n with ops.control_dependencies(local_center_vars_update):\n for v, lv in zip(local_vars, local_center_vars):\n with ops.device(v.device):\n differences.append(math_ops.subtract(v, lv))\n for lvar, diff in zip(local_vars, differences):\n with ops.device(lvar.device):\n update_ops.append(\n state_ops.assign_sub(lvar,\n math_ops.multiply(self._moving_rate,\n diff)))\n for var, diff in zip(global_center_vars, differences):\n with ops.device(var.device):\n update_ops.append(\n state_ops.assign_add(var,\n math_ops.multiply(self._moving_rate,\n diff)))\n if global_step:\n with ops.colocate_with(global_step):\n update_ops.append(state_ops.assign_add(global_step, 1))\n variable_update = control_flow_ops.group(*(update_ops))\n return variable_update\n\n with ops.control_dependencies([local_update]):\n condition = math_ops.equal(\n math_ops.mod(self._local_step, self._period), 0)\n conditional_update = control_flow_ops.cond(\n condition, _Update_global_variables, control_flow_ops.no_op)\n return conditional_update", "def update_parameters(parameters, grads, learning_rate=0.01):\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n ### END CODE HERE ###\n\n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n dW3 = grads[\"dW3\"]\n db3 = grads[\"db3\"]\n ## END CODE HERE ###\n\n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - (learning_rate * dW1)\n b1 = b1 - (learning_rate * db1)\n W2 = W2 - (learning_rate * dW2)\n b2 = b2 - (learning_rate * db2)\n W3 = W3 - (learning_rate * dW3)\n b3 = b3 - (learning_rate * db3)\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n\n return parameters", "def handle_gradient(self):\n self._optimizer.sync_grad()", "def optimize(self,gradients):\n\n for k in range(self.size):\n delta_weight = self.learningRate * gradients[k]\n full_change = delta_weight + self.momentum*self.last_change[k]\n self.weights[k] -= full_change\n self.last_change[k] = 1*gradients[k] #copy gradient mat", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError('Adafactor does not support sparse gradients.')\n\n state = self.state[p]\n grad_shape = grad.shape\n\n factored, use_first_moment = self._get_options(group, grad_shape)\n # State Initialization\n if len(state) == 0:\n state['step'] = 0\n\n if use_first_moment:\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(grad)\n if factored:\n state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)\n state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)\n else:\n state['exp_avg_sq'] = torch.zeros_like(grad)\n\n state['RMS'] = 0\n else:\n if use_first_moment:\n state['exp_avg'] = state['exp_avg'].to(grad)\n if factored:\n state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)\n state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)\n else:\n state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)\n\n p_data_fp32 = p.data\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n\n state['step'] += 1\n state['RMS'] = self._rms(p_data_fp32)\n group['lr'] = self._get_lr(group, state)\n\n beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])\n update = (grad**2) + group['eps'][0]\n if factored:\n exp_avg_sq_row = state['exp_avg_sq_row']\n exp_avg_sq_col = state['exp_avg_sq_col']\n\n exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))\n exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))\n\n # Approximation of exponential moving average of square of gradient\n update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)\n update.mul_(grad)\n else:\n exp_avg_sq = state['exp_avg_sq']\n\n exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)\n update = exp_avg_sq.rsqrt().mul_(grad)\n\n update.div_(\n (self._rms(update) / group['clip_threshold']).clamp_(min=1.0)\n )\n update.mul_(group['lr'])\n\n if use_first_moment:\n exp_avg = state['exp_avg']\n exp_avg.mul_(group['beta1']).add_(1 - group['beta1'], update)\n update = exp_avg\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n p_data_fp32.add_(-update)\n\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p.data.copy_(p_data_fp32)\n\n return loss", "def step(self, closure=None):\n orig_loss,err,pred = closure()\n loss = orig_loss\n\n group = self.param_groups[0]\n lr = group['lr']\n decay_lr = group['decay_lr']\n max_iter = group['max_iter']\n reg = group['reg']\n backtrack = group['backtrack']\n bt_alpha = group['bt_alpha']\n bt_beta = group['bt_beta']\n sketch_size = group['sketch_size']\n tolerance = group['tolerance']\n\n #import pdb; pdb.set_trace()\n n = err.shape[0] #batch size\n #If sketching the jacobian, randomly select [sketch_size] samples\n \n if sketch_size is not None:\n idx = torch.randperm(n)[:sketch_size]\n else:\n idx = torch.arange(n) #Don't sketch, use all samples\n \n w0 = nn.utils.parameters_to_vector(self._params) #weight parameters in vector form\n \n #Compute Gauss-Newton vector product \n grad, ggnvp = _make_ggnvp(err,self._params,w0,n,reg,idx) #return gradient in vector form + ggnvp function\n #Solve for the Conjugate Gradient Direction\n dw, cost_log = _conjugate_gradient(ggnvp, grad, max_iter, tolerance)\n\n #Perform backtracking line search\n val = loss + 0.5 * reg * torch.norm(w0)**2\n fprime = -1*dw @ grad\n \n self.grad_update += 1\n if backtrack > 0:\n t = lr\n\n #TODO: If using backtracking, get new loss with (w0 - t*dw) as network parameters\n bts = 0\n alpha = bt_alpha\n beta = bt_beta \n while (loss + 0.5 * reg * torch.norm(w0 - t*dw)**2 > val + alpha * t * fprime):\n t = beta * t\n bts += 1\n if bts > backtrack:\n print('Maximum backtracking reached, accuracy not guaranteed')\n break\n elif decay_lr: #decay lr\n t = lr/np.maximum(1, self.grad_update-10)\n else: #use lr step-size\n t = lr\n\n print('step size: {}'.format(t))\n\n #Update the model parameters\n self._add_grad(-t, dw)\n \n return val, pred", "def add_grad_updates(self):\n \n gradients = T.grad(self.cost, self.theta)\n \n for target_param, grad in zip(self.theta, gradients):\n \n if target_param.name ==\"W\" and self.num_hidden ==0\\\n and self.zero_diag:\n \n grad = grad - T.diag(T.diag(grad)) # no x i - xi connections\n # for all i = 1, ..., D\n ##############################################################\n if target_param.name ==\"b\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n elif target_param.name ==\"bhid\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n else:\n \n if self.use_momentum:\n \n # alternative definition (mostly seen):\n #g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n #T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n #self.updates[target_param] = target_param + g_tilda\n \n g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n (1-self.momentum)*grad\n \n self.updates[target_param] = target_param +\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*g_tilda\n \n # store g_tilda for next iteration:\n self.updates[self.grad_vec[target_param.name]] = g_tilda\n \n else:\n \n self.updates[target_param] = target_param -\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n \n if (\"PCD\" in self.algorithm) and self.num_hidden > 0:\n \n self.updates[self.persistent_gibbs] = self.hid_samples", "def _step(self):\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n # random choose the samples\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config", "def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n lr = group[\"lr\"]\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n dampening = group[\"dampening\"]\n nesterov = group[\"nesterov\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n d_p = p.grad\n if weight_decay != 0:\n d_p = d_p.add(p, alpha=weight_decay)\n if momentum != 0:\n param_state = self.state[p]\n if \"momentum_buffer\" not in param_state:\n buf = param_state[\"momentum_buffer\"] = torch.clone(d_p).detach()\n buf.mul_(1 - dampening) # Added to scale buffer appropriately\n else:\n buf = param_state[\"momentum_buffer\"]\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n if nesterov:\n d_p = d_p.add(buf, alpha=momentum)\n else:\n d_p = buf\n\n p.add_(d_p, alpha=-lr)\n\n param_state = self.state[p]\n g = p.grad\n if \"step\" not in param_state:\n param_state[\"step\"] = 0\n param_state[\"buffers\"] = {}\n else:\n param_state[\"step\"] += 1\n buffer_dict = param_state[\"buffers\"]\n time = lr * (1 - dampening) * param_state[\"step\"]\n if \"sgd\" in self.save_buffers:\n self._sgd_buffers(time, d_p, buffer_dict)\n if \"mom\" in self.save_buffers:\n self._mom_buffers(time, d_p, buffer_dict)\n if \"grad\" in self.save_buffers:\n self._grad_buffers(time, d_p, buffer_dict)\n if \"grad_norm\" in self.save_buffers:\n self._grad_norm_buffers(time, d_p, buffer_dict)\n\n return loss", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def step(self, closure: OptLossClosure = None) -> OptFloat:\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n\n if p.grad.is_sparse:\n msg = (\n 'SGDW does not support sparse gradients, '\n 'please consider SparseAdam instead'\n )\n raise RuntimeError(msg)\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(\n d_p\n ).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n # Apply momentum\n p.data.add_(-group['lr'], d_p)\n\n # Apply weight decay\n if weight_decay != 0:\n p.data.add_(-group['lr'], weight_decay)\n return loss", "def update(self, loss=None, inputs=None, targets=None, outputs=None):\n\n # TODO: add gradient accumulation\n\n self.optimizer.zero_grad(set_to_none=self.none_grad)\n\n if self.grad_scaler:\n self.grad_scaler.scale(loss).backward()\n self.grad_scaler.step(self.optimizer)\n\n if self.clip_grad:\n self.grad_scaler.unscale_(self.optimizer)\n self.clip_grad(self.model.parameters())\n self.grad_scaler.update()\n else:\n loss.backward()\n\n if self.clip_grad:\n self.clip_grad(self.model.parameters())\n\n self.optimizer.step()", "def _optimize(self, p_loss):\r\n \r\n self._optimizer.zero_grad()\r\n p_loss.backward()\r\n self._optimizer.step()", "def step(self, closure: Optional[Callable[[], float]] = None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n \"Adam does not support sparse gradients, please \"\n \"consider SparseAdam instead\")\n\n state = self.state[p]\n # State initialization\n if len(state) == 0:\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['betas']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m / (next_v.sqrt() + group['eps'])\n\n # Just adding the square of the weights to the loss function is\n # *not* # the correct way of using L2 regularization or weight\n # decay with Adam, since that will interact with the m and v\n # parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't\n # interact with the m/v parameters. This is equivalent to adding\n # the square of the weights to the loss with plain\n # (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n lr = group['lr']\n update_with_lr = lr * update\n p.data.add_(-update_with_lr)\n\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss", "def optimize_parameters(self):\n self.loss_total.backward() # calculate gradients\n self.optimizer.step()\n self.optimizer.zero_grad()\n torch.cuda.empty_cache()", "def increment_grad(self, curr_loss, curr_state, hook=None):\n prev_loss = self._prev_loss\n prev_state = self._prev_state\n d_loss = None\n norm = None\n\n if self.loss:\n d_loss = curr_loss - prev_loss\n if d_loss > 0 and self.stabilizer:\n d_loss = -d_loss\n\n if self.norm:\n norm = compute_global_norm(curr_state, prev_state, d_loss)\n\n for n, p in self.state.items():\n if not p.requires_grad:\n continue\n\n curr_param = curr_state[n].detach()\n prev_param = prev_state[n].detach()\n prev_param_grad = prev_state[n].grad.detach()\n\n add = prev_param - curr_param\n if self.loss:\n add += -d_loss * prev_param_grad\n\n if self.norm:\n add.data.div_(norm)\n\n if hook is not None:\n hook(add)\n\n p.grad.add_(add)", "def optimize(self, loss, global_step, lr):\n tf.summary.scalar('learning_rate', lr)\n\n # Compute gradients\n # opt = tf.train.MomentumOptimizer(lr, option.MOMENTUM)\n # opt = tf.train.AdamOptimizer(lr)\n # opt = tf.train.RMSPropOptimizer(lr)\n optimizer = tf.train.GradientDescentOptimizer(lr)\n train_op = optimizer.minimize(loss,\n global_step=global_step,\n gate_gradients=optimizer.GATE_NONE)\n return train_op", "def sgd(params, grads, lr, batch_size): #@save\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)", "def optimize(self, learning_rate=0.0002, beta1=0.5, global_step=None):\n opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1)\n return opt.apply_gradients(self.gradients, global_step=global_step)", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n grad = p.grad\n continue\n else:\n grad =p.grad\n\n # s = self.spikes.popitem()\n #grad = jnp.matmul(p.grad, s.data)\n\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = jnp.zeros(grad.shape)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = jnp.zeros(grad.shape)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad += group['weight_decay'] * p.data\n\n # Decay the first and second moment running average coefficient\n exp_avg = exp_avg * beta1 + (1 - beta1) * grad\n exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * grad * grad\n denom = jnp.sqrt(exp_avg_sq) + group['eps']\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * jmath.sqrt(bias_correction2) / bias_correction1\n\n p.data += -step_size * exp_avg / denom\n\n return loss", "def _UpdateGradient(self):\n self.mol.GetGradient('analytic')", "def compute(self, step: int) -> None:\n if self.episode >= self._wp.training_delay:\n # Transfer network parameters if episode 0 or 100 * n.\n if self.episode % 100 == 0:\n self._policy.transfer_parameters()\n\n self.get_logger().info('Computing gradients...')\n if self.atype in ['REINFORCE', 'A2C']:\n batch = self._db.sample_batch(step, 'all')\n self._policy.train(batch, step)\n\n else:\n batch = self._db.sample_batch(self._wp.batch_size)\n self._policy.train(batch, self._wp.batch_size)\n\n else:\n self.get_logger().warn(\n 'Skipping computing gradients till episode ' +\n f'{self._wp.training_delay}!'\n )\n\n # Move to `push` section\n self.flag.shift_to_push_cycle()", "def step(self):\n if self.defaults['max_grad_norm'] > 0:\n device = self.param_groups[0]['params'][0].device\n global_grad_norm = torch.zeros(1, device=device)\n\n max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is not None:\n grad = p.grad\n global_grad_norm.add_(grad.pow(2).sum())\n\n global_grad_norm = torch.sqrt(global_grad_norm)\n\n clip_global_grad_norm = torch.clamp(max_grad_norm / (global_grad_norm + group['eps']), max=1.0)\n else:\n clip_global_grad_norm = 1.0\n\n for group in self.param_groups:\n beta1, beta2, beta3 = group['betas']\n # assume same step across group now to simplify things\n # per parameter step can be easily support by making it tensor, or pass list into kernel\n if 'step' in group:\n group['step'] += 1\n else:\n group['step'] = 1\n\n bias_correction1 = 1.0 - beta1 ** group['step']\n\n bias_correction2 = 1.0 - beta2 ** group['step']\n\n bias_correction3 = 1.0 - beta3 ** group['step']\n\n for p in group['params']:\n if p.grad is None:\n continue\n\n state = self.state[p]\n if len(state) == 0:\n state['exp_avg'] = torch.zeros_like(p)\n state['exp_avg_sq'] = torch.zeros_like(p)\n state['exp_avg_diff'] = torch.zeros_like(p)\n\n grad = p.grad.mul_(clip_global_grad_norm)\n if 'pre_grad' not in state or group['step'] == 1:\n state['pre_grad'] = grad\n\n copy_grad = grad.clone()\n\n exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']\n diff = grad - state['pre_grad']\n\n update = grad + beta2 * diff\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t\n exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t\n exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t\n\n denom = ((exp_avg_sq).sqrt() / math.sqrt(bias_correction3)).add_(group['eps'])\n update = ((exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2)).div_(denom)\n\n if group['no_prox']:\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\n p.add_(update, alpha=-group['lr'])\n else:\n p.add_(update, alpha=-group['lr'])\n p.data.div_(1 + group['lr'] * group['weight_decay'])\n\n state['pre_grad'] = copy_grad", "def _update_params(self, gradients: dict, learning_rate: float):\n L = len(self.activations)\n\n for l in range(L):\n self.params[\"W_\" + str(l + 1)] = self.params[\"W_\" + str(l + 1)] - learning_rate * gradients[\n \"dW\" + str(l + 1)]\n\n self.params[\"b_\" + str(l + 1)] = self.params[\"b_\" + str(l + 1)] - learning_rate * gradients[\n \"db\" + str(l + 1)]", "def step(self, batch_id, closure):\n loss = closure()\n dist_sq_acum = 0.0\n grad_dist_sq_acum = 0.0\n\n #print(\"step loss: \", loss)\n\n for group in self.param_groups:\n momentum = group['momentum']\n weight_decay = group['weight_decay']\n learning_rate = group['lr']\n\n for p in group['params']:\n if p.grad is None:\n continue\n gk = p.grad.data\n\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg'].type_as(p.data)\n tilde_x = param_state['tilde_x']\n\n if momentum != 0:\n buf = param_state['momentum_buffer']\n\n #########\n\n if self.vr_from_epoch is None or self.epoch < self.vr_from_epoch:\n vr_gradient = gk.clone() # Just do sgd steps\n else:\n gi = gktbl[batch_id, :].cuda()\n\n vr_gradient = gk.clone().sub_(gi - gavg)\n\n # Some diagnostics\n iterate_diff = p.data - tilde_x\n #pdb.set_trace()\n dist_sq_acum += iterate_diff.norm()**2 #torch.dot(iterate_diff,iterate_diff)\n grad_diff = gi - gk\n grad_dist_sq_acum += grad_diff.norm()**2 #torch.dot(grad_diff,grad_diff)\n\n if weight_decay != 0:\n vr_gradient.add_(weight_decay, p.data)\n\n if momentum != 0:\n dampening = 0.0\n vr_gradient = buf.mul_(momentum).add_(1 - dampening, vr_gradient)\n\n # Take step.\n p.data.add_(-learning_rate, vr_gradient)\n\n # Update running iterate mean:\n param_state['running_x'].mul_(self.running_interp).add_(1-self.running_interp, p.data)\n\n # track number of minibatches seen\n self.batches_processed += 1\n\n dist = math.sqrt(dist_sq_acum)\n grad_dist = math.sqrt(grad_dist_sq_acum)\n\n self.inrun_iterate_distances.append(dist)\n self.inrun_grad_distances.append(grad_dist)\n\n return loss", "def test_update_parameters(model):\n train_inputs = torch.tensor([[1., 2., 3.]])\n train_loss = 0.5 * (model(train_inputs) ** 2)\n\n params = gradient_update_parameters(model,\n train_loss,\n params=None,\n step_size=0.5,\n first_order=False)\n\n assert train_loss.item() == 264.5\n assert list(params.keys()) == ['weight']\n assert torch.all(params['weight'].data == torch.tensor([[-9.5, -20., -29.5]]))\n\n \"\"\"\n The new loss function (still with respect to the weights of the model w) is\n defined as:\n g(w) = 0.5 * (4 * w'_1 + 5 * w'_2 + 6 * w'_3) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * df / dw_1)\n + 5 * (w_2 - 0.5 * df / dw_2)\n + 6 * (w_3 - 0.5 * df / dw_3)) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * 1 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 5 * (w_2 - 0.5 * 2 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 6 * (w_3 - 0.5 * 3 * (1 * w_1 + 2 * w_2 + 3 * w_3))) ** 2\n = 0.5 * ((4 - 4 * 0.5 - 5 * 1.0 - 6 * 1.5) * w_1\n + (5 - 4 * 1.0 - 5 * 2.0 - 6 * 3.0) * w_2\n + (6 - 4 * 1.5 - 5 * 3.0 - 6 * 4.5) * w_3) ** 2\n = 0.5 * (-12 * w_1 - 27 * w_2 - 42 * w_3) ** 2\n\n Therefore the gradient of the function g with respect to w (and evaluated\n at w = [2, 3, 5]) is:\n dg / dw_1 = -12 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 3780\n dg / dw_2 = -27 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 8505\n dg / dw_3 = -42 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 13230\n \"\"\"\n test_inputs = torch.tensor([[4., 5., 6.]])\n test_loss = 0.5 * (model(test_inputs, params=params) ** 2)\n\n grads = torch.autograd.grad(test_loss, model.parameters())\n\n assert test_loss.item() == 49612.5\n assert len(grads) == 1\n assert torch.all(grads[0].data == torch.tensor([[3780., 8505., 13230.]]))", "def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n momentum = group['momentum']\n beta = group['beta']\n eps = group['eps']\n clip_threshold = group['clip_threshold']\n for p in group['params']:\n if p is None:\n continue\n grad = p.grad\n\n state = self.state[p]\n shape = grad.shape\n rank = len(shape)\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['momentum_buffer'] = 0.\n _add_initial_accumulators(state, grad)\n\n if grad.is_sparse:\n # the update is non-linear so indices must be unique\n grad.coalesce()\n grad_indices = grad._indices()\n grad_values = grad._values()\n\n # Transform update_values into sparse tensor\n def make_sparse(values):\n constructor = grad.new\n if grad_indices.dim() == 0 or values.dim() == 0:\n return constructor().resize_as_(grad)\n return constructor(grad_indices, values, grad.size())\n\n acc = state[_key(0)]\n update_values = _compute_sparse_update(beta, acc, grad_values, grad_indices)\n self._update_sparse_accumulator(beta, acc, make_sparse(update_values))\n\n # Add small amount for numerical stability\n update_values.add_(eps).rsqrt_().mul_(grad_values)\n\n update = make_sparse(update_values)\n else:\n # Get previous accumulators mu_{t-1}\n if rank > 1:\n acc_list = [state[_key(i)] for i in range(rank)]\n else:\n acc_list = [state[_key(0)]]\n\n # Get update from accumulators and gradients\n update = _compute_update(beta, acc_list, grad, clip_threshold)\n\n # Update accumulators.\n self._update_accumulator(beta, acc_list, update)\n\n # Add small amount for numerical stability\n update.add_(eps).rsqrt_().mul_(grad)\n\n if momentum > 0.:\n m = state['momentum_buffer']\n update.mul_(1. - momentum).add_(m, alpha=momentum)\n state['momentum_buffer'] = update.detach()\n\n p.sub_(update, alpha=group['lr'])\n state['step'] += 1\n return loss", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters", "def update_lr(epoch, optimizer, args):\n gamma = 0\n for step in args.step:\n if epoch + 1.0 > int(step):\n gamma += 1\n lr = args.lr * math.pow(0.1, gamma)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()", "def update_parameters(parameters, grads, learning_rate = 1.2):\n\t# Retrieve each parameter from the dictionary \"parameters\"\n\tW1 = parameters['W1']\n\tb1 = parameters['b1']\n\tW2 = parameters['W2']\n\tb2 = parameters['b2']\n\n\t# Retrieve each gradient from the dictionary \"grads\"\n\tdW1 = grads['dW1']\n\tdb1 = grads['db1']\n\tdW2 = grads['dW2']\n\tdb2 = grads['db2']\n\n\t# Update rule for each parameter\n\tW1 = W1 - learning_rate*dW1\n\tb1 = b1 - learning_rate*db1\n\tW2 = W2 - learning_rate*dW2\n\tb2 = b2 - learning_rate*db2\n\n\tparameters = {\"W1\": W1,\n\t\t\t\t\t\"b1\": b1,\n\t\t\t\t\t\"W2\": W2,\n\t\t\t\t\t\"b2\": b2}\n\n\treturn parameters", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def grad_step(\n *,\n optimizer: Optimizer,\n optimizer_wds: List[float] = 0,\n grad_clip_fn: Callable = None\n ):\n for group, wd in zip(optimizer.param_groups, optimizer_wds):\n if wd > 0:\n for param in group[\"params\"]:\n param.data = param.data.add(-wd * group[\"lr\"], param.data)\n if grad_clip_fn is not None:\n grad_clip_fn(group[\"params\"])\n optimizer.step()", "def stiefel_update(self, param, constraints, grad, moment, lr):\n p = param\n m = moment\n g = grad\n\n new_g = g - K.dot(p, matrix_sym_op(K.dot(K.transpose(p), g)))\n\n v = self.momentum * m - lr * new_g # velocity\n # v.name = p.name + '_v'\n # m.name = p.name + '_m'\n self.updates.append(K.update(m, v))\n\n\n # if self.nesterov:\n # new_p = p + self.momentum * v - lr * new_g\n # else:\n new_p = p + v\n p_shape = new_p.get_shape()\n if p_shape[0]._value > p_shape[1]._value:\n new_p, _ = tf.qr(new_p, full_matrices=False)\n else:\n new_p, _ = tf.qr(tf.transpose(new_p), full_matrices=False)\n new_p = tf.transpose(new_p)\n # apply constraints\n if p in constraints:\n c = constraints[p]\n new_p = c(new_p)\n\n self.updates.append(K.update(p, new_p))", "def perform_update(self, gradient):\n w = sys.modules[self.shared_mem_name].__dict__[\"w\"]\n w -= self.learning_rate * gradient", "def _optimize(optimizer, regularization_losses, scope, **kwargs):\n sum_loss = _gather_loss(regularization_losses, scope)\n grad = None\n if sum_loss is not None:\n grad = optimizer.compute_gradients(sum_loss, **kwargs)\n return sum_loss, grad", "def update_step(image_batch, label_batch, model, learning_rate):\n f = model.forward(image_batch)\n gradient = model.backward(f,label_batch)\n model.w = model.w - learning_rate*gradient", "def on_batch_end(self, state: _State):\n if not state.need_backward_pass:\n return\n\n loss = state.batch_metrics[self.loss_key]\n optimizer = self._optimizer\n\n self._accumulation_counter += 1\n need_gradient_step = \\\n (self._accumulation_counter + 1) % self.accumulation_steps == 0\n\n # This is very hacky check whether we have AMP optimizer and this may\n # change in future.\n # But alternative solution is to have AmpOptimizerCallback.\n # or expose another c'tor argument.\n if hasattr(optimizer, \"_amp_stash\"):\n from apex import amp\n # Need to set ``delay_unscale``\n # according to\n # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n delay_unscale = not need_gradient_step\n with amp.scale_loss(\n loss, optimizer, delay_unscale=delay_unscale\n ) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if need_gradient_step:\n self.grad_step(\n optimizer=optimizer,\n optimizer_wds=self._optimizer_wd,\n grad_clip_fn=self.grad_clip_fn\n )\n\n # if self.save_model_grads:\n # for tag, value in model.named_parameters():\n # tag = tag.replace(\".\", \"/\")\n # state.model_grads[tag] = value.grad.cpu().numpy()\n\n utils.maybe_recursive_call(optimizer, \"zero_grad\")\n\n self._accumulation_counter = 0", "def gradients_to_updates(self, params, grads):\n return NotImplementedError('Abstract class method')", "def update_parameters(parameters, grads, learning_rate):\n pass", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n return loss", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss", "def __call__(self, step, inputs, hparams={}):\n\t\tstep+=1\n\n\t\t# apply gradient tape\n\t\ttape, loss = self.tape_gradients(inputs, **hparams)\n\t\t\n\t\t# check nan in loss\n\t\tif np.isnan(loss.numpy()):\n\t\t\tstring=\"Nan Loss on step %s:\\t rec loss = %s\\t, reg loss = %s\\t\" % (\n\t\t\t\tstep, \n\t\t\t\tself.reconstruction_loss.numpy(),\n\t\t\t\tself.regularization_loss.numpy(),\n\t\t\t\t)\n\t\t\treturn np.nan\n\n\t\t# optimize\n\t\tself.run_optimizer(tape, loss)\n\n\t\tprint('\\033[Kstep %s:\\t rec loss = %s\\t, reg loss = %s\\t' % (\n\t\t\tstep, \n\t\t\tself.reconstruction_loss.numpy(),\n\t\t\tself.regularization_loss.numpy(),\n\t\t\t), \"\\r\", end=\"\")\n\n\t\treturn step", "def sgd_optim(config = None, global_step = None):\n learning_rate = config[\"learning_rate\"]\n \n train_step = tf.train.GradientDescentOptimizer(learning_rate)\n #train_step = tf.train.GradientDescentOptimizer(learning_rate)\n return train_step", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad\n if grad.is_sparse:\n raise RuntimeError(\"RMSpropTF does not support sparse gradients\")\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # PyTorch initialized to zeros here\n state[\"square_avg\"] = torch.ones_like(p, memory_format=torch.preserve_format)\n if group[\"momentum\"] > 0:\n state[\"momentum_buffer\"] = torch.zeros_like(p, memory_format=torch.preserve_format)\n if group[\"centered\"]:\n state[\"grad_avg\"] = torch.zeros_like(p, memory_format=torch.preserve_format)\n\n square_avg = state[\"square_avg\"]\n alpha = group[\"alpha\"]\n\n state[\"step\"] += 1\n\n if group[\"weight_decay\"] != 0:\n grad = grad.add(p, alpha=group[\"weight_decay\"])\n\n square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha)\n\n if group[\"centered\"]:\n grad_avg = state[\"grad_avg\"]\n grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha)\n # PyTorch added epsilon after square root\n # avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).sqrt_().add_(group['eps'])\n avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add_(group[\"eps\"]).sqrt_()\n else:\n # PyTorch added epsilon after square root\n # avg = square_avg.sqrt().add_(group['eps'])\n avg = square_avg.add(group[\"eps\"]).sqrt_()\n\n if group[\"momentum\"] > 0:\n buf = state[\"momentum_buffer\"]\n buf.mul_(group[\"momentum\"]).addcdiv_(grad, avg)\n p.add_(buf, alpha=-group[\"lr\"])\n else:\n p.addcdiv_(grad, avg, value=-group[\"lr\"])\n\n return loss", "def update_gradients(self, dw, db):\n self.w = self.w - self.lr * dw\n self.b = self.b - (self.lr * db)", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def _optimize(self):\n # Retrieve all trainable variables\n train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n # Compute the gradient (return a pair of variable and their respective gradient)\n grads = self.optimizer.compute_gradients(loss=self.loss, var_list=train_variables)\n self.train_dis = self.optimizer.apply_gradients(grads, global_step=self.global_step)", "def gradient_update(self, states, Q_target):\n params = [self.w1, self.w2, self.w3, self.b1, self.b2, self.b3]\n loss = self.get_loss(states, Q_target)\n gradients = nn.gradients(loss, params)\n self.w1.update(gradients[0], -self.learning_rate)\n self.w2.update(gradients[1], -self.learning_rate)\n self.w3.update(gradients[2], -self.learning_rate)\n self.b1.update(gradients[3], -self.learning_rate)\n self.b2.update(gradients[4], -self.learning_rate)\n self.b3.update(gradients[5], -self.learning_rate)", "def backward_and_step(self, loss):\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads['dW' + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads['db' + str(l+1)]\n \n return parameters", "def optimization(err_acc, learning_rate):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n tvars = tf.trainable_variables()\n grads = tf.gradients(err_acc, tvars)\n tg_pairs = [(tf.clip_by_value(k[0], -100, 100), k[1]) for k in zip(grads, tvars) if k[0] is not None]\n train_op = optimizer.apply_gradients(tg_pairs)\n return train_op", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n lr = group[\"lr\"]\n weight_decay = group[\"weight_decay\"]\n momentum = group['momentum']\n dampening = group[\"dampening\"]\n nesterov = group[\"nesterov\"]\n maximize = group[\"maximize\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n grad = p.grad\n\n if grad.is_sparse:\n raise RuntimeError(\"SGD does not support sparse gradients.\")\n\n grad = grad if not maximize else -grad\n\n if weight_decay != 0:\n grad = grad.add(p, alpha=weight_decay)\n\n if momentum != 0:\n buf = self.state[p][\"momentum_buffer\"]\n\n buf.mul_(momentum).add_(grad, alpha=1.0 - dampening)\n\n if nesterov:\n grad.add_(buf, alpha=momentum)\n else:\n grad = buf\n\n p.add_(-lr * grad)\n\n return loss", "def train_step(model, x, optimizer):\n if model.architecture == \"VPGA\":\n with tf.GradientTape(persistent=True) as tape:\n enc_loss, dec_loss = model.compute_loss(x)\n loss = enc_loss + dec_loss\n enc_grads = tape.gradient(enc_loss, model.trainable_variables)\n dec_grads = tape.gradient(dec_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(enc_grads + dec_grads, model.trainable_variables))\n del tape\n else:\n with tf.GradientTape() as tape:\n loss = model.compute_loss(x)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss", "def train_step(model, x, optimizer):\n with tf.GradientTape() as tape:\n loss = compute_loss(model, x)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))", "def step(self, closure=None):\n self.steps += 1\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n k = group['k']\n alpha_k = group['alpha_k']\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('RMSprop does not support sparse gradients')\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['square_avg'] = torch.zeros_like(p.data)\n if group['momentum'] > 0:\n state['momentum_buffer'] = torch.zeros_like(p.data)\n if group['centered']:\n state['grad_avg'] = torch.zeros_like(p.data)\n\n square_avg = state['square_avg']\n alpha = group['alpha']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)\n\n if group['centered']:\n grad_avg = state['grad_avg']\n grad_avg.mul_(alpha).add_(1 - alpha, grad)\n avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt_().add_(group['eps'])\n else:\n avg = square_avg.sqrt().add_(group['eps'])\n\n if group['momentum'] > 0:\n buf = state['momentum_buffer']\n buf.mul_(group['momentum']).addcdiv_(grad, avg)\n p.data.add_(-group['lr'], buf)\n else:\n p.data.addcdiv_(-group['lr'], grad, avg)\n # ---- Lookahead Algorithm ----\n if 'slow_weight' not in state:\n state['slow_weight'] = p.data.clone().detach()\n if self.steps % k == 0:\n p.data = state['slow_weight'] + alpha_k * (p.data - state['slow_weight'])\n state['slow_weight'] = p.data.clone().detach()", "def update(self):\n\n # Update W (gradient should be up-to-date)\n _projected_step(self.W, self.gW, 1.0 / self.lipschitz_W())\n\n # Update H (need to recompute residuals since W was updated).\n self.cache_resids()\n self.cache_gH()\n _projected_step(self.H, self.gH, self.step_size)\n\n # Update residuals and gradient computation for W (for next iteration).\n self.cache_resids()\n self.cache_gW()\n\n # Return loss\n return self.loss", "def train_step(model, x, optimizer):\n with tf.GradientTape() as tape:\n loss = compute_loss(model, x)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss", "def step(self, layers, d_weights, d_biases, lr):\n for layer, d_W, d_b in zip(layers, d_weights, d_biases):\n layer.update(-lr * d_W, -lr * d_b)", "def pgd_optimizer(self, sess, X, y, optimization_step, num_iter, loss, delta, last = False, featurized_X = None):\n feed_dict = None\n if not last:\n feed_dict = {self.x: X, self.y: y}\n sess.run(tf.initialize_variables([delta]), feed_dict = feed_dict)\n else:\n feed_dict = {self.x : X, self.featurizations: featurized_X, self.y: y}\n sess.run(tf.initialize_variables([delta]), feed_dict = feed_dict)\n feed_dict = {self.featurizations: featurized_X, self.y: y}\n\n for i in range(num_iter):\n print(\"iteration: %d\"%i)\n sess.run(optimization_step, feed_dict = feed_dict)\n loss_adv = sess.run(loss, feed_dict = feed_dict)\n print(\"loss %f\" %loss_adv)\n return True", "def update_parameters(self, parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural network\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n self.t += 1\n\n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n self.v[\"dW\" + str(l + 1)] = self.beta1 * self.v[\"dW\" + str(l + 1)] + (1 - self.beta1) * grads['dW' + str(l + 1)]\n self.v[\"db\" + str(l + 1)] = self.beta1 * self.v[\"db\" + str(l + 1)] + (1 - self.beta1) * grads['db' + str(l + 1)]\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n v_corrected[\"dW\" + str(l + 1)] = self.v[\"dW\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n v_corrected[\"db\" + str(l + 1)] = self.v[\"db\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n self.s[\"dW\" + str(l + 1)] = self.beta2 * self.s[\"dW\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['dW' + str(l + 1)], 2)\n self.s[\"db\" + str(l + 1)] = self.beta2 * self.s[\"db\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['db' + str(l + 1)], 2)\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n s_corrected[\"dW\" + str(l + 1)] = self.s[\"dW\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n s_corrected[\"db\" + str(l + 1)] = self.s[\"db\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * v_corrected[\"dW\" + str(l + 1)] / np.sqrt(self.s[\"dW\" + str(l + 1)] + self.epsilon)\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * v_corrected[\"db\" + str(l + 1)] / np.sqrt(self.s[\"db\" + str(l + 1)] + self.epsilon)\n\n return parameters", "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n # This is a default implementation of apply_gradients() that can be shared\n # by most optimizers. It relies on the subclass implementing the following\n # methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().\n\n base_update_op = super(JTPSOptimizer, self).apply_gradients(\n grads_and_vars,\n global_step=global_step,\n name=name\n )\n\n with tf.control_dependencies([base_update_op]):\n fn, _ = self.get_constraint_fn()\n lambda_update_op = []\n\n if self.granularity == 'global':\n lambda_grad = 0\n for g, v in grads_and_vars:\n if g is not None:\n delta_prev = self.get_slot(v, 'delta')\n v_cur = self.get_slot(v, 'theta')\n lambda_v = self.get_lambda(v)\n\n v_cur_from_prev = v_cur + fn(lambda_v) * delta_prev\n lambda_grad_cur = g * tf.gradients(v_cur_from_prev, lambda_v)[0]\n lambda_grad += tf.reduce_sum(lambda_grad_cur)\n\n lambda_update_op.append(\n self.lambda_optimizer._apply_dense(lambda_grad, self.global_lambda)\n )\n else:\n for g, v in grads_and_vars:\n if g is not None:\n delta_prev = self.get_slot(v, 'delta')\n v_cur = self.get_slot(v, 'theta')\n lambda_v = self.get_lambda(v)\n\n v_cur_from_prev = v_cur + fn(lambda_v) * delta_prev\n lambda_grad_cur = g * tf.gradients(v_cur_from_prev, lambda_v)[0]\n if self.granularity == 'variable':\n lambda_grad_cur = tf.reduce_sum(lambda_grad_cur)\n else:\n assert self.granularity == 'cell', 'Unrecognized granularity type \"%s\"' % self.granularity\n\n lambda_update_op += [\n self.lambda_optimizer._apply_dense(lambda_grad_cur, lambda_v),\n ]\n\n lambda_update_op = control_flow_ops.group(*lambda_update_op)\n with tf.control_dependencies([lambda_update_op]):\n update_op = []\n for g, v in grads_and_vars:\n if g is not None:\n v_cur = self.get_slot(v, 'theta')\n delta_prev = self.get_slot(v, 'delta')\n delta_cur = delta_prev.assign(v - v_cur)\n if self.granularity == 'global':\n lambda_cur = self.global_lambda\n else:\n lambda_cur = self.get_slot(v, 'lambda')\n\n v_new = v_cur + fn(lambda_cur) * delta_cur\n\n update_op += [\n state_ops.assign(v, v_new),\n ]\n\n update_op = control_flow_ops.group(*update_op)\n\n return update_op", "def step(self):\n loss = None\n for group in self.param_groups:\n for p in group['params']:\n grad = p.grad.data\n state = self.state[p]\n\n if len(state) == 0:\n t = 0\n m = torch.zeros_like(p.data)\n v = torch.zeros_like(p.data)\n # v_hat = torch.zeros_like(p.data)\n else:\n t = state['t']\n m = state['m']\n v = state['v']\n\n b1 = group['beta1']\n b2 = group['beta2']\n t += 1\n\n m = torch.mul(m, b1) + (1-b1) * grad\n v = torch.mul(v, b2) + (1-b2) * grad**2\n\n m_unbias = 1 / (1 - b1**t)\n v_unbias = 1 / (1 - b2**t)\n\n p.data -= (group['lr'] * m_unbias / math.sqrt(v_unbias)) * \\\n m / (math.sqrt(v_unbias) + group['eps'])\n\n # v_hat = torch.max(v_hat, v)\n # p.data -= group['lr'] / m_unbias * m * v_hat / (v_unbias.sqrt() + group['eps'])\n state['t'] = t\n state['m'] = m\n state['v'] = v\n\n return loss", "def step(self, closure=None):\n self.steps += 1\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n k = group['k']\n alpha = group['alpha']\n for p in group['params']:\n if p.grad is None:\n continue\n\n # Perform stepweight decay\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\n\n # Perform optimization step\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n else:\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n\n step_size = group['lr'] / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n # ---- Lookahead Algorithm ----\n if 'slow_weight' not in state:\n state['slow_weight'] = p.data.clone().detach()\n if self.steps % k == 0:\n p.data = state['slow_weight'] + alpha * (p.data - state['slow_weight'])\n state['slow_weight'] = p.data.clone().detach()\n state['exp_avg'].fill_(0)\n state['exp_avg_sq'].fill_(0)\n\n return loss", "def step(self):\n for p, grad, v, square_grad_avg, delta_x_acc in self.params:\n # Compute the running average of the squared gradients \n square_grad_avg.mul_(self.rho)\n square_grad_avg.addcmul_(grad, grad, value = 1 - self.rho)\n # Compute the RMS of the previous squared gradients (eps to avoid numerical issues later for division)\n std = (square_grad_avg.add_(self.eps)).sqrt_()\n # Compute the accumulated update\n delta_x = ((delta_x_acc.add_(self.eps)).sqrt_()) * grad / std\n # Accumulate the updates\n delta_x_acc.mul_(self.rho)\n delta_x_acc.addcmul_(delta_x, delta_x, value = 1 - self.rho) \n # Update the parameters\n p.add_(delta_x, alpha = - self.lr)", "def apply_gradient(self, learning_rate):\n raise NotImplementedError()", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = grad.new().resize_as_(grad).zero_()\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['l1_weight_decay'] != 0:\n grad = grad.add(group['l1_weight_decay'], torch.sign(p.data))\n if group['l2_weight_decay'] != 0:\n grad = grad.add(group['l2_weight_decay'], p.data)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n # custom clamping\n if group['lower_bound'] is not None:\n p.data.clamp_(min=group['lower_bound'])\n if group['upper_bound'] is not None:\n p.data.clamp_(max=group['upper_bound'])\n\n return loss", "def adapt(self, loss, step=None, first_order=None, allow_unused=None, allow_nograd=None):\n if first_order is None:\n first_order = self.first_order\n if allow_unused is None:\n allow_unused = self.allow_unused\n if allow_nograd is None:\n allow_nograd = self.allow_nograd\n second_order = not first_order\n\n gradients = []\n if allow_nograd:\n # Compute relevant gradients\n diff_params = [p for p in self.module.parameters() if p.requires_grad]\n grad_params = grad(\n loss,\n diff_params,\n retain_graph=second_order,\n create_graph=second_order,\n allow_unused=allow_unused,\n )\n grad_counter = 0\n\n # Handles gradients for non-differentiable parameters\n for param in self.module.parameters():\n if param.requires_grad:\n gradient = grad_params[grad_counter]\n grad_counter += 1\n else:\n gradient = None\n gradients.append(gradient)\n else:\n try:\n gradients = grad(\n loss,\n self.module.parameters(),\n retain_graph=second_order,\n create_graph=second_order,\n allow_unused=allow_unused,\n )\n except RuntimeError:\n traceback.print_exc()\n print(\n \"learn2learn: Maybe try with allow_nograd=True and/or allow_unused=True ?\"\n )\n\n # Update the module\n assert step is not None, \"step cannot be None when using LSLR!\"\n self.module = maml_pp_update(self.module, step, lrs=self.lrs, grads=gradients)", "def adversarial_update(model, optimiser, loss_fn, x, y, epoch, eps, step, k, norm, **kwargs):\n model.train()\n\n # Adversial perturbation\n if norm == 'inf':\n x_adv = iterated_fgsm(model, x, y, loss_fn, k=k, step=step, eps=eps, norm='inf', random=args.random_start)\n elif norm == 2:\n x_adv = pgd(model, x, y, loss_fn, k=k, step=step, eps=eps, norm=2, random=args.random_start)\n else:\n raise ValueError('Unsupported norm')\n\n optimiser.zero_grad()\n y_pred = model(x_adv)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimiser.step()\n\n return loss, y_pred", "def update(\r\n params: hk.Params,\r\n opt_state: OptState,\r\n batch, label, agreement\r\n ) -> Tuple[hk.Params, OptState]:\r\n # grads = jax.grad(loss)(params, batch, label)\r\n # grads_masked = (gradient_per_sample if use_ilc else gradient)(params, batch, label) # (gradient_per_sample)(params, batch, label)\r\n # sum_grad_masked_regularized = jax.tree_multimap(lambda x,y:x+y,grads_masked,gradient_reg(params))\r\n # grads = sum_grad_masked_regularized\r\n # updates, opt_state = opt.update(grads, opt_state)\r\n # new_params = optax.apply_updates(params, updates)\r\n\r\n grads_samples = gradient_per_sample(params, batch, label)\r\n ANDmask = and_mask(agreement)\r\n\r\n masked_grads,_ = ANDmask.update(grads_samples, opt_state)\r\n reg_grads = gradient_reg(params)\r\n\r\n sum_grad_masked_regularized = jax.tree_multimap(lambda x,y:x+y,masked_grads,reg_grads)\r\n \r\n updates,_ = opt.update(sum_grad_masked_regularized, opt_state)\r\n\r\n new_params = optax.apply_updates(params, updates)\r\n\r\n return new_params, opt_state", "def EvaluateGradient(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def optimize(w, b, X, Y, num_iterations,learning_rate,print_cost = False):\n costs = []\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads,cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training examples\n if print_cost and i%100==0:\n print(\"Cost after iteration %i: %f\"%(i,cost))\n\n params = {\n \"w\":w,\n \"b\":b\n }\n grads = {\n \"dw\":dw,\n \"db\":db\n }\n return params,grads,costs", "def _train_step(self, params, sim_data):\n \n # Compute loss and store gradients\n with tf.GradientTape() as tape:\n loss = self.loss(self.network, params, sim_data)\n \n # One step backprop\n gradients = tape.gradient(loss, self.network.trainable_variables)\n self._apply_gradients(gradients, self.network.trainable_variables) \n \n return loss.numpy()", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n # state = self.state[p]\n\n # # State initialization\n # if len(state) == 0:\n # state['step'] = 0\n # # Exponential moving average of gradient values\n # state['exp_avg'] = torch.zeros_like(p.data)\n # # Exponential moving average of squared gradient values\n # state['exp_avg_sq'] = torch.zeros_like(p.data)\n # if amsgrad:\n # # Maintains max of all exp. moving avg. of sq. grad. values\n # state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n # exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n # if amsgrad:\n # max_exp_avg_sq = state['max_exp_avg_sq']\n # beta1, beta2 = group['betas']\n\n # state['step'] += 1\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n # if group['weight_decay'] != 0:\n # grad.add_(group['weight_decay'], p.data)\n\n # # Decay the first and second moment running average coefficient\n # exp_avg.mul_(beta1).add_(1 - beta1, grad)\n # exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n # if amsgrad:\n # # Maintains the maximum of all 2nd moment running avg. till now\n # torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # # Use the max. for normalizing running avg. of gradient\n # denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n # else:\n # denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n\n # step_size = group['lr'] / bias_correction1\n\n # p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss", "def optimize(self, loss):\n \n optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)\n #optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr)\n return optimizer.minimize(loss)", "def backward_pass(self, loss):\n\n self.optimizer.zero_grad()\n self.optimizer.backward(loss)\n self.optimizer.step()", "def maml_pp_update(model, step=None, lrs=None, grads=None):\n if grads is not None and lrs is not None:\n params = list(model.parameters())\n if not len(grads) == len(list(params)):\n msg = \"WARNING:maml_update(): Parameters and gradients have different length. (\"\n msg += str(len(params)) + \" vs \" + str(len(grads)) + \")\"\n print(msg)\n # TODO: Why doesn't this work?? I can't assign p.grad when zipping like this... Is this\n # because I'm using a tuple?\n # for named_param, g in zip(\n # [(k, v) for k, l in model.named_parameters() for v in l], grads\n # ):\n # p_name, p = named_param\n it = 0\n for name, p in model.named_parameters():\n if grads[it] is not None:\n lr = None\n layer_name = name[: name.rfind(\".\")].replace(\n \".\", \"-\"\n ) # Extract the layer name from the named parameter\n lr = lrs[layer_name][step]\n assert (\n lr is not None\n ), f\"Parameter {name} does not have a learning rate in LSLR dict!\"\n p.grad = grads[it]\n p._lr = lr\n it += 1\n\n # Update the params\n for param_key in model._parameters:\n p = model._parameters[param_key]\n if p is not None and p.grad is not None:\n model._parameters[param_key] = p - p._lr * p.grad\n p.grad = None\n p._lr = None\n\n # Second, handle the buffers if necessary\n for buffer_key in model._buffers:\n buff = model._buffers[buffer_key]\n if buff is not None and buff.grad is not None and buff._lr is not None:\n model._buffers[buffer_key] = buff - buff._lr * buff.grad\n buff.grad = None\n buff._lr = None\n\n # Then, recurse for each submodule\n for module_key in model._modules:\n model._modules[module_key] = maml_pp_update(model._modules[module_key])\n return model", "def train_step(model,x,optimizer):\r\n\twith tf.GradientTape() as tape:\r\n\t\tloss = compute_loss(model, x)\r\n\t\tgradients = tape.gradient(loss, model.trainable_variables)\r\n\t\toptimizer.apply_gradients(zip(gradients, model.trainable_variables))\r\n\treturn loss", "def train_step(model, optimizer, x):\n\n with tf.GradientTape() as tape:\n loss, RE, KL = model.loss(x, average=True)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n return loss, RE, KL", "def train_step(self, loss, flags):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n # Define global step counter\n\n optimizer = flags['optimizer']\n global_step = flags['global_step']\n\n # For batch-norm\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n # Gradient clipping\n grads = optimizer.compute_gradients(loss)\n [self._gradient_summary(var, grad, 'grad') for var, grad in grads]\n\n if flags['grad_clipping']:\n grads = [(tf.clip_by_value(grad, -1., 1.), tvar) for grad, tvar in\n grads if grad is not None]\n\n [self._gradient_summary(var, grad, 'clipped_grad') for var, grad in grads]\n\n train_step = optimizer.apply_gradients(grads_and_vars=grads, global_step=global_step)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return train_step", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad.add_(group['weight_decay'], p.data)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = -group['lr'] * math.sqrt(bias_correction2) / bias_correction1 * exp_avg / denom\n\n if group[\"hyperboloid\"]:\n self.expm(p.data, step_size)\n else:\n p.data.add_(step_size)\n\n return loss", "def adjust_learning_rate(optimizer, gamma, step):\n global lr\n lr = lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def update(self,parameters, grads):\n \n L = len(parameters) // 2 # number of layers in the neural network\n #print(L)\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n \n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - self.alpha * grads[\"dW\" + str(l+1)]\n \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - self.alpha * grads[\"db\" + str(l+1)]\n \n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters", "def step(self):\n\n self.compute_lr()\n\n self.optimizer.param_groups[self.param_group]['lr'] = self.lr\n self.optimizer.param_groups[self.param_group]['momentum'] = self.momentum", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n p.data.add_(-group['lr'], d_p)\n return loss" ]
[ "0.7796381", "0.7476913", "0.7210945", "0.71824896", "0.7161772", "0.7142518", "0.7097461", "0.7039974", "0.7000394", "0.6955703", "0.69353473", "0.6932707", "0.6929035", "0.6923562", "0.68890387", "0.68879765", "0.68699014", "0.6863703", "0.6860742", "0.68581945", "0.684478", "0.68380547", "0.68339", "0.68319196", "0.68188936", "0.68072975", "0.6793701", "0.6790944", "0.67859733", "0.6785722", "0.6780894", "0.6779312", "0.6757562", "0.6737812", "0.67294323", "0.6727152", "0.6715475", "0.67109734", "0.6710723", "0.67084694", "0.6701168", "0.6689414", "0.6684758", "0.66795117", "0.667467", "0.66603804", "0.6651187", "0.6642249", "0.66396034", "0.6638506", "0.66362923", "0.6635234", "0.66334194", "0.6609519", "0.66001713", "0.6593352", "0.6581442", "0.65781057", "0.6574087", "0.6571389", "0.6569894", "0.65593445", "0.65575624", "0.65277946", "0.65114063", "0.6504854", "0.65014654", "0.6498532", "0.6494579", "0.6485647", "0.64717925", "0.646834", "0.6462472", "0.64438576", "0.64414716", "0.6438703", "0.64338213", "0.64280605", "0.64182824", "0.64156765", "0.6406691", "0.64063114", "0.6403674", "0.6393421", "0.63882786", "0.6383887", "0.638135", "0.63786554", "0.6376511", "0.6365432", "0.63595307", "0.6359417", "0.63558775", "0.63504255", "0.63403404", "0.6335943", "0.6334777", "0.63344187", "0.63342017", "0.63327783", "0.63309956" ]
0.0
-1
Returns an encoded sequence from contents in msg
def encode(msg): #Corner cases: if msg == '': return '' # the empty string yields an empty string if not isinstance(msg, str): return '' # What to do on non-strings (isinstance also allows for subclasses) if msg == None: return '' # If None (void) is passed in res = [] # Store RLE results in dict old = msg[0] # Begin with the first char in the message i = 0 for c in msg: if c == old: # Another observation of the same char, increase run length by 1 i += 1 else: # A different char # Store results up to now res.append(f'{i}{old}') # Prepare for next round old = c i = 1 res.append(f'{i}{old}') # Return the concatenation of all observed run lengths return ''.join(res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(rosMsg): #@NoSelf", "def encode(self, seq):", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def chunkify(msg):\n return [\"%s %s\" % (i, msg[i*158 : (i+1)*158]) for i in range(len(msg)/158 + 1)]", "def encryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n encoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code+key\n new = chr(change)\n string += new\n key += key_increment\n \n encoded = ''.join(string)\n return ('Encoded Message:\\t' + encoded)", "def get_encoded_msg():\n print(\"Enter text you would like to decode:\\n\")\n e_msg = input(\">\")\n return e_msg", "def bytes(self):\n return e(self._seq)", "def caesar_str(m, msg, modulo, start):\n global c\n\n l = len(msg)\n\n for i in range(0, len(c)):\n c[i] = 0\n\n for i in range(0, l):\n c[i] = caesar(m, msg[i], i_rand_a(), modulo, start)\n\n return bytes(c)", "def __compose(self, msg):\n header = b'\\xFB\\xBF'\n end = b'\\xED'\n # Length is sum of header(2), length, check + msg bytes\n length = bytes([4 + len(msg)])\n # Check is sum of length + msg (length+(cmd+params)), with modulus\n # to fit into a single byte\n check_list = bytearray(length)\n check_list.extend(msg)\n check = bytes([sum(check_list) % 256])\n return header + length + msg + check + end", "def encode(self):\n self.preprocess_msg()\n self._find_e()\n\n self.__encoded_msg = self._calculate(self.e)", "def decode_sequence(self, sequence=list) -> str:\n try:\n out = []\n for word in sequence:\n out.append(self.decode(word))\n return(out)\n except Exception as error:\n print(f\"Error: self.decode_sequence({sequence}) -> {error}\")", "def decryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n decoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code-key\n new = chr(change)\n string += new\n key += key_increment\n decoded = ''.join(string)\n return ('Decoded Message:\\t' + decoded)", "def MakeSeq(self,content):\n return self.register(Seq(content,reg=self))", "def decode(msg, mov):\n new = \"\"\n minus = [chr(x) for x in range(97, 123)]\n mayus = [chr(x) for x in range(65, 91)]\n corres = {}\n mv = mov % 26\n for i in range(0, 26):\n if i - mov < 26:\n corres[minus[i]] = minus[(i - mv) % 26]\n corres[mayus[i]] = mayus[(i - mv) % 26]\n else:\n corres[minus[i]] = minus[(i - mv - 26) % 26]\n corres[mayus[i]] = mayus[(i - mv - 26) % 26]\n for l in msg:\n if l in mayus + minus:\n new += corres[l]\n else:\n new += l\n return new", "def _decode_text(self):\n\n print(f\"Hex decode; received message is {self.message}\")\n return bytes.fromhex(self.message).decode('utf-8')", "def rendermsg(self,msg):\n return ' '.join(['%02x'%ord(x) for x in msg])", "def encode(msg: str) -> str:\n\n msg_bytes = msg.encode(\"ascii\")\n b64_bytes = base64.b64encode(msg_bytes)\n return b64_bytes.decode(\"ascii\")", "def decode(seq):\n\n # Handle some corner cases\n if (not seq) or (not isinstance(seq, str)):\n return '' # Return empty string on non-strings and all non-true values (empty string, None, 0, ...)\n\n # Use regex to match patterns, t is then a list of tuples (if any patterns found)\n # '2k3b' -> [('2','k'), ('3','b')] ...notice that integers are still string-formatted\n t = re.findall(r'(\\d)(\\D)', seq)\n\n # Return if empty\n if not t:\n return ''\n\n # Use a list comprehension to work on the tuples... Convert integers to int\n # [('2','k'), ('3','b')] -> ['k'*2 , 'b'*3] -> ['kk', 'bbb']\n msg = [c*int(i) for i,c in t]\n\n # Concatenate without separators, msg is now a string\n msg = ''.join(msg)\n\n return msg", "def parse_message(message):\n temp = \"\"\n for i in message:\n if i == bitarray('10100011'):\n temp += 'ESC' + ' '\n elif i == bitarray('01111110'):\n temp += 'FLAG' + ' '\n else:\n temp += i.tobytes().decode('ascii') + ' '\n return temp.strip()", "def decode_message(self, raw):\n return raw.decode('utf-8')", "def decode_affine(msg, a, b):\n #Inverse of the modulo\n m = find_coprime(a)\n \n decoded_message = [ RVALUES[(m * (VALUES[i] - b)) % 26] for i in msg ]\n \n return ''.join(decoded_message)", "def decode(\n self,\n whole_msg: bytes,\n credentials: Credentials,\n ) -> PDU:\n\n security_model_id = 2\n if self.security_model is None:\n self.security_model = create_sm(security_model_id)\n\n decoded, _ = decode(whole_msg, enforce_type=Sequence)\n\n msg = self.security_model.process_incoming_message(decoded, credentials)\n return msg", "def encode_affine(msg, a, b):\n \n #Code to numbers\n encoded_message = [ RVALUES[(a * VALUES[i] + b) % 26] for i in msg ]\n \n return ''.join(encoded_message)", "async def get_msg(self):\n try:\n # 2^8 bytes at a time. I just like it, no special reason\n data = await self.reader.read(256)\n msg = data.decode()\n addr = writer.get_extra_info(\"peername\")\n logging.info(\"Received %s from %s\", (msg, addr))\n\n except Exception as e:\n logging.error(\"Command could not be decoded; %s\", e)\n\n return msg", "def _escape(msg):\n reserved = bytearray('\\x7E\\x7D\\x11\\x13'.encode())\n escaped = bytearray()\n escaped.append(msg[0])\n\n for byte in msg[1:]:\n\n if byte in reserved:\n escaped.append(0x7D)\n escaped.append(byte ^ 0x20)\n else:\n escaped.append(byte)\n\n return escaped", "def _encode_text(self):\n\n print(f\"Hex encode; received message is {self.message}\")\n return self.message.encode(\"utf-8\").hex()", "def process_message(message):\r\n message = gensim.utils.to_unicode(message, 'latin1').strip()\r\n blocks = message.split(u'\\n\\n')\r\n # skip email headers (first block) and footer (last block)\r\n content = u'\\n\\n'.join(blocks[1:])\r\n return content", "def decode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to decode: \", end=\"\")\n message = input()\n extract_encoded_message = message.split(LEFT_TO_RIGHT_MARK)[1]\n message = extract_encoded_message\n extract_encoded_message = message.split(RIGHT_TO_LEFT_MARK)[0]\n encoded = ''\n decoded = ''\n\n for message_char in message:\n if message_char in zero_space_symbols:\n encoded = encoded + str(zero_space_symbols.index(message_char))\n\n cur_encoded_char = ''\n\n for index, encoded_char in enumerate(encoded):\n cur_encoded_char = cur_encoded_char + encoded_char\n if index > 0 and (index + 1) % padding == 0:\n decoded = decoded + chr(int(cur_encoded_char, len(zero_space_symbols)))\n cur_encoded_char = ''\n\n return decoded", "def get_message():\n msg = str(input('-- Input the message: '))\n msg = msg.split()\n x = []\n for i in msg:\n if i == 'ESC':\n x.append('10100011')\n x.append('10100011')\n elif i == 'FLAG':\n x.append('10100011')\n x.append('01111110')\n else:\n try:\n x.append(format(ord(i),'08b'))\n except(TypeError):\n print(Exception)\n print(\"Entered non char value in message\")\n return None\n return x", "def rot13( self, mess, args):\n return args.encode('rot13')", "def from_msg(cls, msg):\n if cls._debug:\n log.debug('msg=%s', msg)\n key, seq_s, uuid, prop_s, body = msg\n key = key if key else None\n seq = struct.unpack('!q', seq_s)[0]\n body = body if body else None\n if body:\n body = pipeline.load(body)\n #body = json.loads(body_s)\n #prop = json.loads(prop_s)\n prop = pipeline.load(prop_s)\n return cls(seq, uuid=uuid, key=key, properties=prop, body=body)", "def decode(self, msg):\n if len(msg) < 2:\n raise ValueError(\"Message is too short - can't fit a preamble\")\n preamble = msg[:2]\n \n (x,) = struct.unpack(\"<H\", preamble)\n \n ID = (x & self.ID_MASK) >> 4\n LEN = x & self.LEN_MASK\n\n if LEN < 0 or LEN > 8:\n raise ValueError(\"Invalid CAN payload length - %d bytes not in [0,8] bytes\" % LEN)\n\n if LEN != len(msg[2:]):\n raise ValueError(\"Length from preamble %d mismatches actual length %d in packet w/id %#x\" %\n (LEN, len(msg[2:]), ID))\n\n TIME = datetime.datetime.utcnow()\n \n if ID in self.descriptors:\n desc = self.descriptors[ID]\n if \"format\" not in desc:\n raise ValueError(\"No format specified for %#x:%s\" % (ID, desc[\"name\"]))\n if LEN != struct.calcsize(\"<\" + str(desc[\"format\"])):\n raise ValueError(\"Error in decoding message id=%#x name=%s - length field %d mismatches descriptor %d\"\n % (ID, desc[\"name\"], LEN, struct.calcsize(\"<\" + str(desc[\"format\"]))))\n\n DATA = struct.unpack(\"<\" + str(desc[\"format\"]), msg[2:2+LEN])\n \n return (TIME, ID, desc, DATA)\n else:\n raise ValueError(\"Unknown message id=%#x, len=%d, data=%r\" % (ID, LEN, msg[2:]))", "def transform_seq(seq):\n # TODO add character checking based on ASCII code\n return \"\".join(\"\" if aa in msa_characters else aa for aa in seq)", "def get_payload(raw_msg):\n\n return raw_msg[14:-6], raw_msg[-5]", "def decode_syn(msg):\n\n cut = msg[4:] # Omit the first 4 chars ('SYN;')\n spl = cut.split(';')\n prime = int(spl[0])\n base = int(spl[1])\n a_public = int(spl[2])\n return prime, base, a_public", "def decode(self, seq: Sequence[MorseEvent]) -> str:\n out = MorseDecoder.OUTPUT_INVALID\n\n # truncate input to max length\n seq = seq[:MorseDecoder.LEN_MAX]\n\n for cand in self._seq_all[len(seq) - 1]:\n if cand.seq == seq:\n out = cand.output\n break\n return out", "def unescape(msg):\n skip = False\n unescaped = bytearray()\n\n for i in range(len(msg)):\n\n if not skip and msg[i] is 0x7D:\n\n if not (i + 1) >= len(msg):\n unescaped.append(msg[i + 1] ^ 0x20)\n skip = True\n\n elif not skip:\n unescaped.append(msg[i])\n else:\n skip = False\n\n return unescaped", "def prettyDecode(self,seq):\n s = \"\".join(self.decode(seq))\n s = s.replace(\"_EOS\", \"\" )\n s = s.replace(\"_PAD\", \"\" )\n s = s.replace(\"_\", \" \" )\n return s", "def msg(self):\r\n if self._uris:\r\n raise AssemblerError('Message still contains missing references.')\r\n\r\n return self._msg", "def to_string(msg):\n if type(msg) is bytes:\n msg = str(msg)\n msg = msg[2:]\n return msg[:-1]\n else:\n return msg", "def encode(self, message):\n return message.encode()", "def load_and_convert_msg(txt):\n bin_code = []\n with open(txt) as msg:\n for line in msg:\n for c in line:\n bin_code.append(dec_to_bin(ord(c)))\n return bin_code", "def decode(self, s):", "def decode(self, s):", "def get_message_content(self):\n body = self.doc.find(\n \".//{http://salmon-protocol.org/ns/magic-env}data\").text\n\n body = urlsafe_b64decode(body.encode(\"ascii\"))\n\n logger.debug(\"diaspora.protocol.get_message_content: %s\", body)\n return body", "def _transform_message(self, message):\n serialized = ev_envelope.serialize_envelope(message)\n return encodeutils.safe_encode(serialized, 'utf-8')", "def decode_ack(msg):\n\n cut = msg[4:] # Omit the first 4 chars ('ACK;')\n return int(cut)", "def assemble_from_iterable(cls, messages: Sequence[\"NMEAMessage\"]) -> \"NMEAMessage\":\n raw = b''\n data = b''\n bit_array = bitarray()\n\n for msg in messages:\n raw += msg.raw\n data += msg.data\n bit_array += msg.bit_array\n\n messages[0].raw = raw\n messages[0].data = data\n messages[0].bit_array = bit_array\n return messages[0]", "def _read_message(self):\n header = self._read_amt(9)\n msg_size = struct.unpack_from(\">q\", header, 1)[0]\n return header + self._read_amt(msg_size - 9)", "def Sign(self, msg):\n emsa_encoded = util.MakeEmsaMessage(msg, self.size)\n return util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0])", "def text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):\n if bits.__len__() != 0:\n int_msg = int(bits, 2)\n return int_msg.to_bytes((int_msg.bit_length() + 7) // 8, 'big').decode(encoding, errors) or '\\0'\n else:\n return \"No valid message found\"", "def __msgtolist(self) -> List[str]:\n return self.msg.splitlines()", "def DecodeCodedMessage(codedmessage):\n message = CODE.GetMessage(codedmessage)\n return message", "def decode_message(self, key):\n\n decoded_message = ''\n for char in self.message:\n if char.isalpha():\n decoded_char = self.convert_char(char, key)\n decoded_message = decoded_message + decoded_char\n else:\n decoded_message = decoded_message + char\n return decoded_message", "def format_msg(msg):\n if type(msg) == str:\n msg = msg.encode()\n header = str(len(msg))\n header = header.zfill(HEADER_SIZE)\n return header.encode(), msg", "def decode_replay_message_events(contents):\n decoder = BitPackedDecoder(contents, typeinfos)\n for event in _decode_event_stream(decoder,\n message_eventid_typeid,\n message_event_types,\n decode_user_id=True):\n yield event", "def getMsg(self):\n return self._buf", "def get_sequence_string(seq):\n if type(seq) == Bio.SeqRecord:\n seqstr = seq.seq.tostring()\n elif type(seq) == Bio.Seq.Seq:\n seqstr = seq.tostring()\n else:\n seqstr = seq\n return seqstr", "def serialize(msg) -> str:\n try:\n return json.dumps(msg, separators=(',', ':'))\n except json.JSONDecodeError as err:\n return err.msg", "def write(self, msg):\n # FIXME: do we have to check the size of msg and split output? \n return self.inout.send(Mtcpfns.pack_msg(msg))", "def decode(self, encoded):", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")", "def Sign(self, msg):\n emsa_encoded = util.MakeEmsaMessage(msg, self.size)\n bigint_bytes = util.TrimBytes(\n util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0]))\n return util.PadBytes(bigint_bytes,\n (self.size // 8) - len(bigint_bytes))", "def get_msg_by_content(self, content):\n msg_data = self.database.search(self.tname, MsgWithTag.get_msg_key(), content)\n if len(msg_data) != 0:\n return (msg_data[0][0], self.data_to_msg(msg_data[0]))\n return None", "def to_string(self):\n return self.sequence", "def encrypt(self, msg, a):\n if type(msg) != bytes:\n raise InvalidMessageType(\n f\"Expected msg with bytes type, but got {type(msg)}\")\n\n x = bitarray() ; x.frombytes(msg)\n # Transform message space: {0,1} -> {-1,1}\n msg_arr = [1 if b else -1 for b in x] \n return [self._encrypt_bit(b, a) for b in msg_arr]", "def parse_message(msg):\n idx = 8\n tag, nickLen = struct.unpack(\"<LL\", msg[:idx])\n if VERSION != (tag>>16):\n raise Exception(\"Wrong version\")\n sender_nickname = msg[idx:idx+nickLen]\n idx += nickLen\n \n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n sender_pubkey = msg[idx:idx+length]\n idx += length\n\n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n dest_pubkey = msg[idx:idx+length]\n idx += length\n\n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n nonce = msg[idx:idx+length]\n idx += length\n\n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n cipher = msg[idx:idx+length]\n idx += length\n \n return sender_nickname, sender_pubkey, dest_pubkey, nonce, cipher", "def encode_message(self, message):\n\n self.validate_machine_config()\n self.validate_message(message)\n encoded_message = ''\n\n for char in message:\n encoded_message += self.encode_char(char)\n\n return encoded_message", "async def extract_data_from_msg(msg):\n\n body = msg.get_body(('html', 'plain',))\n\n msg_out = {\n 'status': 'delivered',\n 'subject': msg['Subject'],\n 'received': datetime.datetime.now().isoformat(),\n 'from': msg['From'].addresses[0],\n 'recipients': list(msg['To'].addresses),\n 'original-to': msg['X-Original-To'],\n 'delivered-to': msg['Delivered-To'],\n 'dkim-signature': msg['DKIM-Signature'],\n 'message-id': msg['Message-ID'],\n 'domain-signature': msg['DomainKey-Signature'],\n 'date': msg['Date'].datetime,\n 'return': msg['Return-Path'] or msg['Reply-To'],\n 'in-thread': False,\n 'body-type': body.get_content_type(),\n 'body-charset': body.get_content_charset(),\n 'body': body.get_content(),\n 'attachments': []\n }\n\n for ind, att in enumerate(msg.iter_attachments()):\n msg_out['attachments'].append({\n 'index': ind,\n 'type': att.get_content_type(),\n 'filename': att.get_filename()\n })\n\n if msg['Thread-Topic']:\n msg_out['in_thread'] = True\n msg_out['thread-topic'] = msg['Thread-Topic']\n msg_out['thread-index'] = msg['Thread-index']\n\n return msg_out", "def serialize_message(self) -> bytes:\n return self.compile_message().serialize()", "def _encode_message(cls, message):\n if message.magic == 0:\n msg = b''.join([\n struct.pack('>BB', message.magic, message.attributes),\n write_int_string(message.key),\n write_int_string(message.value)\n ])\n crc = crc32(msg)\n msg = struct.pack('>i%ds' % len(msg), crc, msg)\n else:\n raise ProtocolError(\"Unexpected magic number: %d\" % message.magic)\n return msg", "def send(self, msg):\n encoded = [(i + '\\r').encode('ascii') for i in msg]\n for i in encoded:\n self.s.write(i)", "def parse_bytes_stream_from_message(msg: bytes,\n length_bytes: int,\n code_bytes: int\n ) -> Dict:\n\n code = int.from_bytes(msg[length_bytes:\n length_bytes + code_bytes],\n byteorder)\n data = msg[length_bytes + code_bytes:]\n\n return {\"code\": code,\n \"data\": data}", "def decode(encoded_message: str, rails: int) -> str:\n message: str = ''\n decoded: list = get_rails(encoded_message, rails)\n\n i = 0\n for row, row_decoded in enumerate(decoded):\n for c, col_decoded in enumerate(row_decoded):\n if col_decoded:\n decoded[row][c] = encoded_message[i]\n i += 1\n\n for c in range(len(decoded[0])):\n for row in range(len(decoded)):\n if decoded[row][c]:\n message += decoded[row][c]\n\n return message", "def seqOfAsciiCode(prova):\n\n # lista in char\n #[ ord (c) for c in prova]\n # la codifica della stringa in hex\n #\"stringa\".decode('hex')\n value = \"\"\n for i in range(len(prova)):\n if (i != len(prova) - 1):\n value = value + str(ord(prova[i])) + \",\"\n else:\n value = value + str(ord(prova[i]))\n return value", "def process_message(self, msg, src):", "def get_sequence(pose, res_nums=None):\n # type: (Pose, list) -> str\n # if no res_nums were given, return the pose's sequence\n if res_nums is None:\n return str(pose.sequence())\n # else, return the sequence of the specified res_nums\n else:\n return str(''.join([pose.residue(r).name1() for r in res_nums]))", "def decode(b64_msg: str) -> str:\n\n b64_bytes = b64_msg.encode(\"ascii\")\n b64_bytes = base64.b64decode(b64_bytes)\n return b64_bytes.decode(\"ascii\")", "def _parse_message(msg):\n lines, body = _split_lines(msg)\n # The first line is the start line.\n start_line = lines[0]\n # Remaining lines are the header.\n header = _parse_header(lines[1 :])\n return start_line, header, body", "def __message_content__(self) -> MessageContent:", "def prepare_msg(raw_message):\n\n raw_message = str(raw_message)\n\n raw_message = raw_message.lower()\n raw_message = raw_message.replace(\"bismarkb1996\", \"\")\n raw_message = raw_message.replace(\"id336383265\", \"\")\n raw_message = re.sub('[^а-яА-Яa-zA-Z0-9\\\\s\\\\-]+', '', raw_message)\n\n split_message = raw_message.split(\" \")\n logger.debug(\"Split message: \" + str(split_message))\n\n message = []\n for msg in [x.split(\"-\") for x in split_message]:\n for i in msg:\n if i != \"\":\n message.append(i)\n\n return message", "def create_seq_record(self, s):\n gene_code = s['gene_code']\n length = self.gene_codes_metadata[gene_code]['length']\n sequence = s['sequences']\n length_difference = length - len(sequence)\n\n sequence += '?' * length_difference\n return sequence", "def encode_message(self, message):\n return message.encode('utf-8')", "def rot13(message):\n\n return ''.join(\n [chr(ord(x) + 13 - 26 * ((ord(x) + 12) // 90))\n if x.isupper()\n else\n chr(ord(x) + 13 - 26 * ((ord(x) + 12) // 122))\n if x.islower()\n else x\n for x in message])", "def encode(self):\n if self.ciphered:\n raise CipherError(\"already encoded.\")\n try:\n self.result = self.doEncode(self.msg,self.shift)\n except Exception as e:\n raise CipherError(\"encoding failure: {}.\".format(e))\n self.ciphered = True\n return self.result", "def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))", "def translate(self) -> Seq:\n AA = \"\".join(\n self.codons[self.sequence[i : i + 3]]\n for i in range(0, len(self.sequence), 3)\n if self.codons[self.sequence[i : i + 3]] != \"Stop\"\n )\n return Seq(AA, self.id)", "def decode(self):\n if self.ciphered:\n msg = self.result \n self.result = ''\n else:\n msg = self.msg\n try:\n self.result = self.doDecode(msg,self.shift)\n except Exception as e:\n raise CipherError(\"decoding failure {}.\".format(e))\n self.ciphered = False\n return self.result", "def encode_message_body(self):\n message_body = array.array('B')\n\n #TODO: enable encoding of MMSs without SMIL file\n ########## MMS body: header ##########\n # Parts: SMIL file + <number of data elements in each slide>\n num_entries = 1\n for page in self._mms_message._pages:\n num_entries += page.number_of_parts()\n\n for data_part in self._mms_message._data_parts:\n num_entries += 1\n\n message_body.extend(self.encode_uint_var(num_entries))\n\n ########## MMS body: entries ##########\n # For every data \"part\", we have to add the following sequence:\n # <length of content-type + other possible headers>,\n # <length of data>,\n # <content-type + other possible headers>,\n # <data>.\n\n # Gather the data parts, adding the MMS message's SMIL file\n smil_part = message.DataPart()\n smil = self._mms_message.smil()\n smil_part.set_data(smil, 'application/smil')\n #TODO: make this dynamic....\n smil_part.headers['Content-ID'] = '<0000>'\n parts = [smil_part]\n for slide in self._mms_message._pages:\n for part_tuple in (slide.image, slide.audio, slide.text):\n if part_tuple is not None:\n parts.append(part_tuple[0])\n\n for part in parts:\n name, val_type = part.headers['Content-Type']\n part_content_type = self.encode_content_type_value(name, val_type)\n\n encoded_part_headers = []\n for hdr in part.headers:\n if hdr == 'Content-Type':\n continue\n encoded_part_headers.extend(\n wsp_pdu.Encoder.encode_header(hdr, part.headers[hdr]))\n\n # HeadersLen entry (length of the ContentType and\n # Headers fields combined)\n headers_len = len(part_content_type) + len(encoded_part_headers)\n message_body.extend(self.encode_uint_var(headers_len))\n # DataLen entry (length of the Data field)\n message_body.extend(self.encode_uint_var(len(part)))\n # ContentType entry\n message_body.extend(part_content_type)\n # Headers\n message_body.extend(encoded_part_headers)\n # Data (note: we do not null-terminate this)\n for char in part.data:\n message_body.append(ord(char))\n\n return message_body", "def demo(self, message:str) -> None:\n print(\"LEAP text encoding: {}\".format(message))\n for byte in message:\n ascii_byte = bytes(byte, 'ascii')\n print(\"{} -> {}\".format(byte, ord(byte)))\n self.ser.write(ascii_byte)\n # time.sleep(0.01)", "def de_base64(msg):\n try:\n msg_ascii = msg.encode('ascii')\n msg_bytes = base64.b64decode(msg_ascii)\n msg_decoded = msg_bytes.decode('ascii')\n return msg_decoded\n except:\n print('Invalid base64-encoded string')", "def get_message():\n\tincoming_message = conn.recv(1024)\n\tincoming_message = incoming_message.decode()\n\treturn incoming_message", "def encrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"encrypt.php\",msg)) as f:\n encryptedmessage = f.read().decode('utf-8',\"strict\")\n return encryptedmessage", "def decrypt(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.decrypt(msg).data", "def _parse_msg(self, msg):\n try:\n self.received_msg += msg.decode()\n except:\n self.log.warning(\"invalid parse frame '%s'\" % msg)\n\n while True:\n pos = self.received_msg.find('\\r')\n if pos == -1: # no full msg\n break\n m = self.received_msg[:pos].strip()\n if not len(m):\n break\n self.platform.process_received_message(m)\n self.received_msg = self.received_msg[pos + 1:]", "def start(self, msg):\n self.jsock.encode(msg)", "def time_encode(self):\n for ii in range(100):\n for fragment in self.msg.encode_msg(1, 16382):\n pass", "def _transform(self, original, coder):\n msg = list(original)\n for k in range(len(msg)):\n if 0x40 < ord(msg[k]) < 0x5a:\n msg[k] = coder[msg[k]]\n return u\"\".join(msg)", "def msgtoDec(msg):\n if(isinstance(msg, str)):\n return int.from_bytes(msg.encode(),byteorder=\"little\")\n else:\n return int.from_bytes(msg,byteorder=\"little\")" ]
[ "0.6726", "0.64823467", "0.6431121", "0.6076899", "0.60573506", "0.6021212", "0.6009153", "0.59417653", "0.5894444", "0.58700716", "0.5841319", "0.5840961", "0.5782641", "0.5758987", "0.572157", "0.57201093", "0.5666906", "0.56631124", "0.56581855", "0.5646804", "0.56445134", "0.56019884", "0.55959207", "0.55898273", "0.5574324", "0.5571218", "0.55646056", "0.5555611", "0.55460745", "0.5528234", "0.5518314", "0.5516346", "0.5493121", "0.547081", "0.54604036", "0.5457865", "0.5455774", "0.5453915", "0.54475415", "0.5438043", "0.5435117", "0.54342616", "0.541627", "0.541627", "0.54003656", "0.5395097", "0.5393648", "0.53880775", "0.5372103", "0.53679025", "0.5348192", "0.53439206", "0.5321325", "0.53012794", "0.52907", "0.52739286", "0.5273136", "0.52680814", "0.5264387", "0.5250353", "0.5250028", "0.5238342", "0.5236275", "0.5233414", "0.5231856", "0.52258664", "0.5223026", "0.52185434", "0.52173805", "0.52113765", "0.52060485", "0.52058244", "0.5201129", "0.51902145", "0.5184819", "0.51824677", "0.51814336", "0.51678526", "0.5165227", "0.5162154", "0.5161897", "0.5158092", "0.5156", "0.51534945", "0.5147561", "0.51420355", "0.5135405", "0.5129108", "0.51188666", "0.5107956", "0.51064205", "0.5087968", "0.5086532", "0.5079269", "0.5075072", "0.50645304", "0.50643635", "0.5063324", "0.5063013", "0.50607663" ]
0.5922205
8
Decodes a Runlength encoded sequence such as '2k3b' into 'kkbbb'
def decode(seq): # Handle some corner cases if (not seq) or (not isinstance(seq, str)): return '' # Return empty string on non-strings and all non-true values (empty string, None, 0, ...) # Use regex to match patterns, t is then a list of tuples (if any patterns found) # '2k3b' -> [('2','k'), ('3','b')] ...notice that integers are still string-formatted t = re.findall(r'(\d)(\D)', seq) # Return if empty if not t: return '' # Use a list comprehension to work on the tuples... Convert integers to int # [('2','k'), ('3','b')] -> ['k'*2 , 'b'*3] -> ['kk', 'bbb'] msg = [c*int(i) for i,c in t] # Concatenate without separators, msg is now a string msg = ''.join(msg) return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(k, key_length):\n key = k[:key_length]\n val_length, ber_length = decode_ber(k[key_length:])\n value = k[key_length + ber_length : key_length + ber_length + val_length]\n return key, value", "def decode(b):\n\n if b.startswith(\"0z\"):\n b = b[2:]\n\n l, i, v = len(b), 0, 0\n for x in b:\n v += _value(x) * (BASE ** (l - (i + 1)))\n i += 1\n\n return v", "def decode(s):\n start = 0\n multiplier = 1\n for char in s[::-1]:\n start += multiplier * LETTERS.index(char)\n multiplier = multiplier * 58\n return start", "def decode(self, s):", "def decode(self, s):", "def decode(s):\n decoded = 0\n multi = 1\n s = s[::-1]\n for char in s:\n decoded += multi * alphabet.index(char)\n multi = multi * base_count\n \n return decoded", "def decode(encoded_key, encoded_string, size):\n\t\n\tdecoded_string = ''\n\ti = 0\n\tfor i in range(0, size):\n\t\tdecoded_string = decoded_string + chr(ord(encoded_string[i]) ^ ord(encoded_key[i % len(encoded_key)]))\n\t\n\treturn str(decoded_string)", "def decompress_seq(x: int, length=16):\n bits = 64\n x = np.uint64(x)\n assert length <= (bits / 2 - 1)\n if x & (1 << (bits - 1)):\n return \"N\" * length\n result = bytearray(length)\n for i in range(length):\n result[(length - 1) - i] = bytearray(NUCS[x & np.uint64(0b11)].encode())[0]\n x = x >> np.uint64(2)\n return result.decode()", "def decode(a):\n return decode(a)", "def decode(str):\n s6 = re.sub('6','\\n',str)\n s5 = re.sub('5','44',s6)\n s4 = re.sub('4','33',s5)\n s3 = re.sub('3','22',s4)\n return re.sub('2',' ',s3)", "def decompression(compressed_sequence:str):\r\n decompressed_sequence=\"\"\r\n for character in compressed_sequence:\r\n decompressed_sequence += bin(ord(character))[2:].zfill(8)\r\n return decompressed_sequence", "def decode(self, s):\n i = 0\n strs = []\n while i < len(s):\n l = int(s[i:i+8], 16)\n strs.append(s[i+8:i+8+l])\n i += 8+l\n return strs", "def decode(str):\r\n\tstr = str.translate(decode_translation)\r\n\tresult = 0\r\n\r\n\tfor c in str:\r\n\t\tresult = result * keyspace_len + keyspace.index(c)\r\n\treturn result", "def decode_position(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n r = twoside_decode(bin)\n if not len(r) == 2 or not len(r[0])==25 or not len(r[1])==25:\n raise bglib.encoding.DecodeError('got bad data: %s '%(s,))\n return r", "def decode(encoded: str) -> int:\n if len(encoded) > 6:\n raise ValueError(\"Parameter too long\")\n\n value: int = 0\n\n for c in encoded:\n value <<= 6\n value += Base64.ord(c)\n value = Base64._int_overflow(value)\n\n return value", "def Decodingfunc(Codebyte):\r\n Decodedint=struct.unpack('b',Codebyte)[0]\r\n N=0 #number of repetitions\r\n L=0 # length of single/multiple sequence\r\n if Decodedint >= 0: #single\r\n N = 1\r\n L = Decodedint+1\r\n else: #multiple\r\n L = -Decodedint//16+1\r\n N = -Decodedint-(L-1)*16+1\r\n #print(\"N =\",N,\" L =\",L)\r\n return (N,L)", "def bin_decode(input, errors='strict'):\n output = \"\"\n assert (len(input) % 8) == 0, \\\n \"Wrong number of bits, %s is not divisible by 8\" % len(input)\n output = ''.join(chr(int(c, 2)) for c in blocks(input, 8))\n return (output, len(input))", "def decode(encoded, alphabet=ALPHABET):\n\n base = len(alphabet)\n decoded = 0\n power = 1\n\n encoded = list(encoded)\n encoded.reverse()\n\n for char in encoded:\n decoded += alphabet.index(char) * power\n power *= base\n\n return decoded", "def _algorithm(self, rut):\n suma = 0\n multi = 2\n for r in rut[::-1]:\n suma += int(r) * multi\n multi += 1\n if multi == 8:\n multi = 2\n return u'0123456789K0'[11 - suma % 11]", "def decode_var_len_uint8(br):\n if br.read_bits(1):\n nbits = br.read_bits(3)\n if nbits == 0:\n return 1\n return br.read_bits(nbits) + (1 << nbits)\n return 0", "def decodeText(m, bitlen):\n # do not use most significant byte\n bytelen = (bitlen // 8) - 1\n mbytes = bytearray()\n for x in m:\n mbytes += x.to_bytes(bytelen, byteorder='little')\n return mbytes.rstrip(b'\\x00').decode('utf-8')", "def __decodeString(self,ascii):\n second = ascii%256\n first = (ascii-second)/256\n return str(chr(first))+str(chr(second))", "def decode(e):\n return sum([length * [item] for length,item in e],[])", "def decode(self, encoded):", "def numDecodings(self, s):\n if not s or s[0] == '0':return 0\n s1,s2 = 1,1\n for m in xrange(1,len(s)):\n if s[m] == '0':s2 = 0\n if s[m-1] == '1' or (s[m-1] == '2' and s[m] <= '6'):\n s2 += s1\n s1 = s2 - s1\n else:\n s1 = s2\n if s2 == 0:return 0\n return s2", "def b58decode(v, length):\n\tlong_value = 0L\n\tfor (i, c) in enumerate(v[::-1]):\n\t\tlong_value += __b58chars.find(c) * (__b58base**i)\n\tresult = ''\n\twhile long_value >= 256:\n\t\tdiv, mod = divmod(long_value, 256)\n\t\tresult = chr(mod) + result\n\t\tlong_value = div\n\tresult = chr(long_value) + result\n\tnPad = 0\n\tfor c in v:\n\t\tif c == __b58chars[0]: nPad += 1\n\t\telse: break\n\tresult = chr(0)*nPad + result\n\tif length is not None and len(result) != length:\n\t\treturn None\n\treturn result", "def get_decoded_value(self, encoded_s):\n stack = Stack()\n node = self.get_root().get_value()[1]\n state = State(node)\n stack.push(state)\n count = 0\n decoded_s = \"\"\n \n while node and count < len(encoded_s):\n if encoded_s[count] == '0':\n if type(node) == str:\n decoded_s += node\n node = self.get_root().get_value()[1]\n if node.has_left_child(): \n node = node.get_left_child()[1]\n # Handle case for the last character found from the encoded string\n if count == len(encoded_s) - 1:\n decoded_s += node\n count += 1\n else: # Go to right node\n if type(node) == str:\n decoded_s += node\n node = self.get_root().get_value()[1]\n if node.has_right_child(): \n node = node.get_right_child()[1]\n # Handle case for the last character found from the encoded string\n if count == len(encoded_s) - 1:\n decoded_s += node\n count += 1\n\n return decoded_s", "def test_decoder(self):\n from sosbeacon.utils import number_decode\n\n encoded = 'b6'\n number = number_decode(encoded)\n self.assertEqual(number, 123)", "def decode(text, password):\r\n\tstep_index = 0\r\n\tdecoded_text = ''\r\n\tfor letter in text:\r\n\t\tdecoded_text += prev_letter(letter, to_int(password[step_index]))\r\n\t\tstep_index += 1\r\n\t\tif step_index > len(password)-1:\r\n\t\t\tstep_index = 0\r\n\treturn decoded_text", "def decode(data): #@NoSelf", "def decode_bytes(s):\n if pandas.isnull(s):\n return s\n\n scales = {\n 'k': 1024,\n }\n if not s.endswith('b'):\n raise Exception(f\"{s} doesn't look like a size\")\n\n scale = 1\n s = s[:-1]\n\n if not s[-1].isdigit():\n scale = scales[s[-1]]\n s = s[:-1]\n\n return int(s) * scale", "def decipher_raw(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n e = [decrypt(u[i], u[i + 1], key) for i in range(len(u))[::2]]\n return b''.join([struct.pack('2I', ee, ef) for ee, ef in e])", "def strQ2B(ustring):\n rstring = \"\"\n for uchar in ustring:\n inside_code = ord(uchar)\n if inside_code == 12288: \n inside_code = 32\n elif (inside_code >= 65281 and inside_code <= 65374): \n inside_code -= 65248\n rstring += chr(inside_code)\n return rstring", "def b58decode(v, length):\n long_value = 0\n for (i, c) in enumerate(v[::-1]):\n long_value += b58_chars.find(c) * (b58_base ** i)\n\n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n\n nPad = 0\n for c in v:\n if c == b58_chars[0]:\n nPad += 1\n else:\n break\n\n result = chr(0) * nPad + result\n if length is not None and len(result) != length:\n return None\n\n return result", "def test_handles_one_char(self):\n result = encode_run_length(\"R\")\n self.assertEqual(result, \"1R\")", "def b58decode(v, length):\n long_value = 0L\n for (i, c) in enumerate(v[::-1]):\n long_value += __b58chars.find(c) * (__b58base**i)\n \n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n \n nPad = 0\n for c in v:\n if c == __b58chars[0]: nPad += 1\n else: break\n \n result = chr(0)*nPad + result\n if length is not None and len(result) != length:\n return None\n \n return result", "def b58decode(v, length):\r\n long_value = 0L\r\n for (i, c) in enumerate(v[::-1]):\r\n long_value += __b58chars.find(c) * (__b58base**i)\r\n\r\n result = ''\r\n while long_value >= 256:\r\n div, mod = divmod(long_value, 256)\r\n result = chr(mod) + result\r\n long_value = div\r\n result = chr(long_value) + result\r\n\r\n nPad = 0\r\n for c in v:\r\n if c == __b58chars[0]: nPad += 1\r\n else: break\r\n\r\n result = chr(0)*nPad + result\r\n if length is not None and len(result) != length:\r\n return None\r\n\r\n return result", "def pos_decode(string,N=10000,bits=7,multipleblocks=1):\n B=1<<bits\n offset=0\n output = [] ## create an array of N zeroes\n for n in range( N ):\n output.append(\"0\")\n clist = list(string)\n if (multipleblocks):\n while len(clist)>0:\n c = clist.pop(0) ## delete the first bit from the input\n if( c == \"1\" ) : ## read 'bits' more bits from the list.\n\t\ti = bin_to_dec( clist , bits ) \n\t\toutput[i + offset] = \"1\" \n\t\tpass\n else :\n\t\toffset += B \n pass\n pass\n assert offset>=N ## check that we received the number of blocks expected\n\tpass\n else :\n while len(clist)>0 :\n i = bin_to_dec( clist , bits ) \n\t output[i] = \"1\"\n pass\n pass\n return \"\".join(output)", "def decode(self, s):\n i, str = 0, []\n while i < len(s):\n sharp = s.find(\"#\", i)\n l = int(s[i:sharp])\n str.append(s[sharp + 1:sharp + l + 1])\n i = sharp + l + 1\n return str", "def reverse_byte_order_positive(result):\n bit_string = \"\"\n while len(result) > 0:\n cur_nybble = result[0:2]\n bit_string = cur_nybble + bit_string\n result = result[3:len(result) + 1]\n\n if len(result) != 0:\n bit_string = \" \" + bit_string\n\n return bit_string", "def slice_string(s, _len):\n return long_to_bytes(int(bin(bytes_to_long(s))[2:2+_len], 2))", "def test_decode():\n enig = Enigma(534, 16, 8, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])\n string = \"\"\"-)m>&)IKp[1`Sro$82[@_`TV&`f%}|<]a1R*\\W4IEb6j@+':`R[.(1$vV4rTJ2\n6V?5.;8q r%0p@+[Ir7-?rzIl;nV<4W7,PD[5-?;RE+~vR5-`i}>=z@S \"eJ`8g:S:1ir\nE0=<F0~/;6).\"\"\"\n decoded = \"\"\"Hello, this is a test string. I will follow this with a return\nbringing it onto a new line. I can do this forever, but I won't. Just\nfor a while.\"\"\"\n\n enig.setrotsettings([5, 2, 2, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5])\n assert_equal(decoded, enig.decode(string))\n\n startsettings = [4, 6, 0, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5]\n assert_equal(startsettings, enig.getrotsettings())", "def decode_key(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()", "def _rle_decode_segment(data: bytes) -> bytearray:\n data = bytearray(data)\n result = bytearray()\n pos = 0\n result_extend = result.extend\n\n try:\n while True:\n # header_byte is N + 1\n header_byte = data[pos] + 1\n pos += 1\n if header_byte > 129:\n # Extend by copying the next byte (-N + 1) times\n # however since using uint8 instead of int8 this will be\n # (256 - N + 1) times\n result_extend(data[pos : pos + 1] * (258 - header_byte))\n pos += 1\n elif header_byte < 129:\n # Extend by literally copying the next (N + 1) bytes\n result_extend(data[pos : pos + header_byte])\n pos += header_byte\n\n except IndexError:\n pass\n\n return result", "def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )", "def decode(self, s):\n res = []\n i, j, length = 0, 0, len(s)\n while i < length:\n if s[j] == ':':\n num = int(s[i:j])\n res.append('' + s[j+1:j+1+num])\n i = j+1+num\n j = j+1+num\n else:\n j+=1\n return res", "def decode(txt):\n size = len(txt)\n i = 0\n txt_result = \"\"\n while i < size:\n\n repetable = 0\n current_char = txt[i]\n if current_char.isdigit():\n repetable = int(current_char) - 1\n for j in range(0, repetable):\n txt_result = txt_result + txt[i - 1]\n i = i + 1\n else:\n txt_result = txt_result + current_char\n i = i + 1\n print(txt_result)\n return txt_result", "def prettyDecode(self,seq):\n s = \"\".join(self.decode(seq))\n s = s.replace(\"_EOS\", \"\" )\n s = s.replace(\"_PAD\", \"\" )\n s = s.replace(\"_\", \" \" )\n return s", "def decode(encoded_word):\n decoded = []\n for i, letter in enumerate(encoded_word):\n if letter.isdigit():\n decoded.append(int(letter) * encoded_word[i + 1])\n elif not encoded_word[i - 1].isdigit():\n decoded.append(letter)\n\n decoded_word = ''.join(decoded)\n return decoded_word", "def retransformation(decompressed_sequence:str, binary_dict:dict, calcul_byte:int):\r\n if calcul_byte != 0:\r\n calcul_byte = (8-calcul_byte)\r\n decompressed_sequence = decompressed_sequence[calcul_byte:]\r\n \"\"\"\r\n Allows you to remove the zeros added at the beginning of a binary \r\n string to make a multiple of 8\r\n \"\"\"\r\n decompress_sequence = \"\"\r\n counter = 0\r\n for position in range(0, len(decompressed_sequence)+1, 1):\r\n if decompressed_sequence[counter:position] in binary_dict.keys():\r\n decompress_sequence += binary_dict[decompressed_sequence[counter:position]]\r\n counter = position\r\n return decompressed_sequence, decompress_sequence", "def decode(string,root):\n ## split the string into a list\n ## then copy the elements of the list one by one.\n answer = []\n clist = list( string )\n ## start from root\n currentnode = root\n for c in clist:\n if ( c=='\\n' ): continue ## special case for newline characters\n assert ( c == '0' )or( c == '1')\n currentnode = currentnode[int(c)]\n if isinstance( currentnode , str ) :\n answer.append( currentnode )\n currentnode = root\n pass\n assert (currentnode == root) ## if this is not true then we have run out of characters and are half-way through a codeword\n return answer", "def decode_barcode_8(nt_barcode):\r\n # check proper length\r\n if len(nt_barcode) != 8:\r\n raise ValueError(\"barcode must be 8 nt long.\")\r\n\r\n # check valid characters\r\n if set(list(nt_barcode)).difference(CUR_ENC_FO.keys()):\r\n raise ValueError(\"Only A,T,C,G valid chars.\")\r\n\r\n # decode\r\n decoded = nt_to_cw(CUR_ENC_FO, nt_barcode)\r\n num_errors, sym = calc_syndrome(decoded, 16)\r\n\r\n # convert corrected codeword back to nt sequence\r\n if num_errors == 1:\r\n nt_barcode = unpack_bitstr(CUR_REV_ENC_SI, ''.join(map(str, decoded)))\r\n elif num_errors > 1:\r\n nt_barcode = None\r\n\r\n return nt_barcode, num_errors / 2.0", "def decoder(alphabet):\n base = len(alphabet)\n index = dict((v, k) for k, v in enumerate(alphabet))\n\n def decode(xs):\n try:\n result = 0\n for i, x in enumerate(xs[::-1]):\n result += (base ** i) * index[x]\n return result\n except KeyError:\n raise ValueError(\"%r is not in the alphabet %r\" % (x, alphabet))\n\n return decode", "def decode_svarint(buf, pos):\n output, pos = decode_uvarint(buf, pos)\n # zigzag encode value\n return wire_format.ZigZagDecode(output), pos", "def decode_svarint(buf, pos):\n output, pos = decode_uvarint(buf, pos)\n # zigzag encode value\n return wire_format.ZigZagDecode(output), pos", "def _decode_int(data: BencodedString) -> int:\n data.del_prefix(1)\n end_marker_index = data.bytes.find(END_MARKER)\n\n if end_marker_index > 0:\n result_bytes = data.get_prefix(end_marker_index)\n data.del_prefix(end_marker_index + 1)\n else:\n raise ValueError(\n \"Cannot decode an integer, reached the end of the bencoded \"\n \"string before the end marker was found. Most likely the \"\n \"bencoded string is incomplete or incorrect.\"\n )\n\n return int(result_bytes.decode(\"ascii\"))", "def test_decompress_seq_diff_9_char(self):\n b_array = bytearray([0]) + bytearray(b'12345678') \\\n + bytearray([0]) + bytearray(b'9')\n actual = LZ77.decompress(b_array)\n expected = '123456789'\n self.assertEqual(actual, expected)", "def rc4_decode(data, key, decode=base64.b64decode, salt_length=16):\n if decode:\n data = decode(data)\n salt = data[:salt_length]\n return crypt(data[salt_length:], sha1(key + salt).digest())", "def build_decoder(shift):\n ### TODO.", "def _decode(value):\n # TODO add support for strings\n return struct.unpack('<i', value)[0]", "def zbase32_decode(text: str) -> bytes:\n result = bytearray(len(text) * 5 // 8)\n cur_byte = 0\n cur_numbits = 0\n idx = 0\n for character in text:\n value = ZBASE32_ALPHABET_REV[character]\n cur_byte = (cur_byte << 5) | value\n cur_numbits += 5\n if cur_numbits >= 8:\n cur_numbits -= 8\n result[idx] = cur_byte >> cur_numbits\n idx += 1\n cur_byte &= (1 << cur_numbits) - 1\n return bytes(result)", "def _le_decode(value):\n return sum(c << 8*i for i, c in enumerate(value[:4]))", "def strQ2B(uchar):\r\n inside_code = ord(uchar)\r\n if inside_code == 12288:\r\n inside_code = 32\r\n elif 65281 <= inside_code <= 65374:\r\n inside_code -= 65248\r\n return chr(inside_code)", "def decode_ber(ber):\n ber = bytearray(ber)\n length = ber[0]\n bytes_read = 1\n if length > 127:\n bytes_read += length & 127 # Strip off the high bit\n length = 0\n for i in range(1, bytes_read):\n length += ber[i] << (8 * (bytes_read - i - 1))\n return length, bytes_read", "def text_from_bits(bits, encoding=\"utf-8\", errors=\"surrogatepass\"):\n n = int(bits, 2)\n return n.to_bytes((n.bit_length() + 7) // 8, \"big\").decode(encoding, errors) or \"\\0\"", "def decode_ber(ber):\n length = ber[0]\n bytes_read = 1\n if length > 127:\n bytes_read += length & 127 # Strip off the high bit\n length = 0\n for i in range(1, bytes_read):\n length += ber[i] << (8 * (bytes_read - i - 1))\n return length, bytes_read", "def test_decompress_seq_diff_8_char(self):\n b_array = bytearray([0]) + bytearray(b'12345678')\n actual = LZ77.decompress(b_array)\n expected = '12345678'\n self.assertEqual(actual, expected)", "def decode_varint(value):\n return decode_varint_stream(value).next()", "def decode(id_string, alphabet=ALPHABET):\n alphabet_len = len(alphabet) # Cache\n return sum([alphabet.index(char) * pow(alphabet_len, power) for power, char in enumerate(reversed(id_string))])", "def dec2int(r: str) -> int:", "def un_pad(s):\n return s[0:-ord(s[-1])]", "def _decode_bytes(data: BencodedString) -> bytes:\n # Get byte string length\n delimiter_index = data.bytes.find(COLON)\n\n if delimiter_index > 0:\n length_prefix = data.get_prefix(delimiter_index)\n string_length = int(length_prefix.decode(\"ascii\"))\n data.del_prefix(delimiter_index + 1)\n else:\n raise ValueError(\n \"Cannot decode a byte string, it doesn't contain a delimiter. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n # Get byte string data\n if len(data.bytes) >= string_length:\n result_bytes = data.get_prefix(string_length)\n data.del_prefix(string_length)\n else:\n raise ValueError(\n f\"Cannot decode a byte string (prefix length \"\n f\"- {string_length}, real_length - {len(data.bytes)}. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n return result_bytes", "def decode(s, storage=BIT_STORAGE, alpha=ALPHABET):\n n = [ord(a) for a in s if a != TWUUENC_START and a != TWUUENC_START_ZLIB]\n bs = BitString()\n for a in n:\n for pos,l in enumerate(alpha):\n if a == l:\n bs.append(BitString(uint=pos, length=storage))\n bs.seekbyte(0)\n return bs.readbytes(len(bs)/8).data.rstrip('\\0')", "def decode(self, t, length, raw=False):\n if length.numel() == 1:\n length = length[0]\n assert t.numel() == length, \"text with length: {} does not match declared length: {}\".format(t.numel(), length)\n if raw:\n return ''.join([self.alphabet[i - 1] for i in t])\n else:\n char_list = []\n for i in range(length):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):\n char_list.append(self.alphabet[t[i] - 1])\n return ''.join(char_list)\n else:\n # batch mode\n assert t.numel() == length.sum(), \"texts with length: {} does not match declared length: {}\".format(t.numel(), length.sum())\n texts = []\n index = 0\n for i in range(length.numel()):\n l = length[i]\n texts.append(\n self.decode(\n t[index:index + l], torch.IntTensor([l]), raw=raw))\n index += l\n return texts", "def decode(encoded):\n #six degrees of precision in valhalla\n inv = 1.0 / 1e6;\n \n decoded = []\n previous = [0,0]\n i = 0\n #for each byte\n while i < len(encoded):\n #for each coord (lat, lon)\n ll = [0,0]\n for j in [0, 1]:\n shift = 0\n byte = 0x20\n #keep decoding bytes until you have this coord\n while byte >= 0x20:\n byte = ord(encoded[i]) - 63\n i += 1\n ll[j] |= (byte & 0x1f) << shift\n shift += 5\n #get the final value adding the previous offset and remember it for the next\n ll[j] = previous[j] + (~(ll[j] >> 1) if ll[j] & 1 else (ll[j] >> 1))\n previous[j] = ll[j]\n #scale by the precision and chop off long coords also flip the positions so\n #its the far more standard lon,lat instead of lat,lon\n decoded.append([float('%.6f' % (ll[1] * inv)), float('%.6f' % (ll[0] * inv))])\n #hand back the list of coordinates\n return decoded", "def bin2int(r: str) -> int:", "def decipher_raw2(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n stringa = str(b'\\xff\\xd8\\xff').replace('\\'', '')\n for i in range(len(u))[::2]:\n e = [decrypt2(u[i], u[i + 1], key)]\n i = b''.join([struct.pack('2I', ee, ef) for ee, ef in e])\n\n prova = str(i).replace('\\'', '')\n\n #lel = prova.find(stringa)\n\n if prova.find(stringa) != -1:\n print(\"detect format file: JPG\")\n return 0\n else:\n return 1", "def decode_string(self, value):\r\n return value", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def decode_encoded_string_value(byte_iter):\n try:\n # First try \"Value-length Char-set Text-string\"\n value_length = wsp_pdu.Decoder.decode_value_length(byte_iter)\n # TODO: add proper support for charsets...\n try:\n charset = wsp_pdu.Decoder.decode_well_known_charset(byte_iter)\n except wsp_pdu.DecodeError, msg:\n raise Exception('encoded_string_value decoding error - '\n 'Could not decode Charset value: %s' % msg)\n\n return wsp_pdu.Decoder.decode_text_string(byte_iter)\n except wsp_pdu.DecodeError:\n # Fall back on just \"Text-string\"\n return wsp_pdu.Decoder.decode_text_string(byte_iter)", "def decode(self, t, length, raw=False):\n if length.numel() == 1:\n length = length[0]\n assert t.numel() == length, \"text with length: {} does not match declared length: {}\".format(t.numel(),\n length)\n if raw:\n return ''.join([self.alphabet[i - 1] for i in t])\n else:\n char_list = []\n for i in range(length):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):\n char_list.append(self.alphabet[t[i] - 1])\n return ''.join(char_list)\n else:\n # batch mode\n assert t.numel() == length.sum(), \"texts with length: {} does not match declared length: {}\".format(\n t.numel(), length.sum())\n texts = []\n index = 0\n for i in range(length.numel()):\n l = length[i]\n texts.append(\n self.decode(\n t[index:index + l], torch.IntTensor([l]), raw=raw))\n index += l\n return texts", "def _decode_octet_string(bytes_data): # type: (bytes) -> bytes\n return bytes_data", "def string_to_int(string, length, vocab):\n\n u = vocab[\"<unk>\"]\n if len(string) > length:\n string = string[:length]\n\n rep = list(map(lambda x: vocab.get(x, u), string))\n\n if len(string) < length:\n rep += [vocab['<pad>']] * (length - len(string))\n\n #print (rep)\n return rep", "def _decode(data: BencodedString) -> Union[bytes, dict, int, list]:\n if not data.bytes:\n raise ValueError(\"Cannot decode an empty bencoded string.\")\n\n if data.bytes[0] == START_DICT:\n return _decode_dict(data)\n\n if data.bytes[0] == START_LIST:\n return _decode_list(data)\n\n if data.bytes[0] == START_INTEGER:\n return _decode_int(data)\n\n if chr(data.bytes[0]).isdigit():\n return _decode_bytes(data)\n\n raise ValueError(\n \"Cannot decode data, expected the first byte to be one of \"\n f\"'d', 'i', 'l' or a digit, got {chr(data.bytes[0])!r} instead.\"\n )", "def decode_text(text, k, n):\n\n code = AsciiCode(k, n)\n generator_m = code.get_generator_mat()\n stripped_text = text.replace(\" \", \"\")\n binary_text_form = transform_text_to_binary(stripped_text)\n disorted_text = distort_message(binary_text_form, generator_m, k, n)\n decoded_text = \"\"\n\n t1 = time.time()\n for letter in disorted_text:\n corrected_letter = code.decode_letter(letter)\n decoded_text = decoded_text + str(corrected_letter)\n\n t2 = time.time()\n total_time = t2-t1\n return decoded_text, total_time", "def decode(self, s):\n o = self._decoder.decode(s)\n return o", "def decode(self, t, length, raw=False):\n if length.numel() == 1:\n length = length.item()\n assert t.numel() == length, \"text with length: {} does not match declared length: {}\".format(t.numel(),\n length)\n if raw:\n return ''.join([self.alphabet[i - 1] for i in t])\n else:\n char_list = []\n for i in range(length):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):\n char_list.append(self.alphabet[t[i] - 1])\n return ''.join(char_list)\n else:\n # batch mode\n assert t.numel() == length.sum(), \"texts with length: {} does not match declared length: {}\".format(\n t.numel(), length.sum())\n texts = []\n index = 0\n for i in range(length.numel()):\n l = length[i]\n texts.append(\n self.decode(\n t[index:index + l], torch.IntTensor([l]), raw=raw))\n index += l\n return texts", "def base62_decode(string, alphabet=ALPHABET):\n base = len(alphabet)\n strlen = len(string)\n num = 0\n\n idx = 0\n for char in string:\n power = (strlen - (idx + 1))\n num += alphabet.index(char) * (base ** power)\n idx += 1\n\n return num", "def base62_decode(string, alphabet=ALPHABET):\n base = len(alphabet)\n strlen = len(string)\n num = 0\n\n idx = 0\n for char in string:\n power = (strlen - (idx + 1))\n num += alphabet.index(char) * (base ** power)\n idx += 1\n\n return num", "def base62_decode(string, alphabet=ALPHABET):\n base = len(alphabet)\n strlen = len(string)\n num = 0\n\n idx = 0\n for char in string:\n power = (strlen - (idx + 1))\n num += alphabet.index(char) * (base ** power)\n idx += 1\n\n return num", "def base62_decode(string, alphabet=ALPHABET):\n base = len(alphabet)\n strlen = len(string)\n num = 0\n\n idx = 0\n for char in string:\n power = (strlen - (idx + 1))\n num += alphabet.index(char) * (base ** power)\n idx += 1\n\n return num", "def decode_b64(s):\n if isinstance(s, dict):\n ret = {}\n for k,v in s.items():\n ret[k] = decode_b64(v)\n return ret\n elif isinstance(s, list) or isinstance(s, tuple):\n ret = []\n for v in s:\n ret.append(decode_b64(v))\n return ret\n elif isinstance(s, str) and s.beginswith('\\0'):\n return standard_b64decode(s[1:])\n else:\n return s", "def decodebytes(s):\n\n decoded = decode(s)\n buf = bytearray()\n while decoded > 0:\n buf.append(decoded & 0xff)\n decoded //= 256\n buf.reverse()\n\n return bytes(buf)", "def build_decoder(shift):\n ### TODO.\n decoder = build_coder(27-shift)\n # print decoder\n return decoder", "def compression(binary_sequence:str):\r\n compressed_sequence = \"\"\r\n calcul_byte =(len(binary_sequence) % 8)\r\n if calcul_byte != 0:\r\n binary_sequence = (8 - calcul_byte)*'0' + binary_sequence\r\n \"\"\" \r\n Add the missing 0's at the beginning of the string so that its length \r\n is divisible by 8 without remainder\r\n \"\"\"\r\n for byte in range(0, len(binary_sequence), 8):\r\n compressed_sequence += chr(int(binary_sequence[byte:byte+8], 2))\r\n return (compressed_sequence, calcul_byte)", "def decode(self, t, length, raw=False):\n if length.numel() == 1:\n length = length[0]\n assert t.numel() == length, \"text with length: {} does not match declared length: {}\".format(t.numel(), length)\n if raw:\n return ''.join([self.alphabet[i] for i in t])\n else:\n char_list = []\n for i in range(length):\n if t[i] != 0 :\n char_list.append(self.alphabet[t[i]])\n else:\n break\n return ''.join(char_list)\n else:\n # batch mode\n assert t.numel() == length.sum(), \"texts with length: {} does not match declared length: {}\".format(t.numel(), length.sum())\n texts = []\n index = 0\n for i in range(length.numel()):\n l = length[i]\n texts.append(\n self.decode(\n t[index:index + l], torch.IntTensor([l]), raw=raw))\n index += l\n return texts", "def decode_message(A):\n a = len(A)\n x = [[0 for letter in A] for char in A]\n for i in range(a):\n x[i][i] = A[i] #covers base case where i = j, palindrome of string length one is one no matter what\n #variable character length (char_len)\n for char_len in range(2,a+1): #choosing different character length\n for i in range(a-char_len+1):\n j = i+char_len-1\n if A[i] == A[j] and char_len == 2:\n x[i][j] = A[i]+A[j]\n elif A[i] == A[j]:\n x[i][j] = A[i]+x[i+1][j-1]#+A[j]\n else:\n x[i][j] = max(x[i][j-1],x[i+1][j],key=len)\n longest = x[0][a-1]\n #l = len(longest)\n return longest#[:l//2+1]", "def pkcs5_unpad(self,s):\n return s[0:-ord(s[-1])]", "def count_decodings(s):\n\n if len(s) == 1:\n return 1\n if len(s) == 2:\n return 2\n including_last_digit = 0\n including_last_two_digit = 0\n if int(s[-1]) > 0:\n including_last_digit = count_decodings(s[:-1])\n if int(s[-2:]) < 28:\n including_last_two_digit = count_decodings(s[:-2])\n return including_last_digit + including_last_two_digit", "def base64_decode(n, encoding='ISO-8859-1'):\t\n decoded = base64.decodestring(n.encode('ascii'))\t\n return tonative(decoded, encoding)" ]
[ "0.69079274", "0.67722535", "0.6671197", "0.6628837", "0.6628837", "0.6597548", "0.6395014", "0.6347437", "0.63195705", "0.62489367", "0.6163923", "0.6139675", "0.6125669", "0.609444", "0.6063985", "0.6007184", "0.59968084", "0.59952885", "0.59780675", "0.5940352", "0.5912996", "0.5893867", "0.58927095", "0.5868516", "0.58317095", "0.58214504", "0.5816458", "0.5814295", "0.58091027", "0.5806967", "0.57955945", "0.5783422", "0.5776791", "0.5775935", "0.57689625", "0.5763676", "0.5756712", "0.57480854", "0.5716777", "0.57146525", "0.5692214", "0.5690698", "0.56866044", "0.5685456", "0.56794685", "0.5642276", "0.56416184", "0.56392163", "0.5635354", "0.56341726", "0.5629618", "0.56294954", "0.56279904", "0.56162447", "0.56162447", "0.56110346", "0.5598692", "0.5598045", "0.5587824", "0.55816424", "0.5579812", "0.5575585", "0.5573985", "0.5568866", "0.5560927", "0.55582553", "0.5557687", "0.5538549", "0.5537869", "0.55210614", "0.5515094", "0.5507966", "0.5497777", "0.54960495", "0.5495226", "0.5492664", "0.5489133", "0.54823786", "0.5481528", "0.5472496", "0.5454922", "0.5448344", "0.54467684", "0.5446484", "0.5444113", "0.54279184", "0.5425916", "0.5425675", "0.5425675", "0.5425675", "0.5425675", "0.54154974", "0.5411644", "0.5404274", "0.5402721", "0.5402212", "0.53974724", "0.5394658", "0.53944093", "0.53930914" ]
0.63877225
7
type + sequence_number + key_size + key + value_size + value 1bit 63bit 32bit varlength 32bit varlength
def __init__(self, key, sequence_number, type=KeyType.PUT, value=None): assert key is not None assert sequence_number >= 0 self.type = type self.sequence_number = sequence_number self.key = key self.value = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_meta_chunk(key, value):\n bkey = key.encode(\"utf-8\")\n bvalue = value.encode(\"utf-8\")\n return (wozardry.to_uint32(len(bkey) + len(bvalue) + 2) + bkey + b'\\x09' + bvalue + b'\\x0A').hex()", "def _pack_dict( self, table, pad = False ) :\r\n\r\n keys, values = zip( *table.items() )\r\n \r\n ## #debug \r\n ## print \"_pack_dict(): keys; values\", keys, values\r\n\r\n # we hope not to be called with an empty dict(), but ... \r\n if len( keys ) <= 0 :\r\n return struct.pack('0s', '') \r\n\r\n #\r\n # preprocess the keys ... \r\n #\r\n\r\n # 4-byte condition check\r\n if not pad : \r\n # \"try\" ... \r\n map( Eggog.check_type, keys ) \r\n map( Eggog.check_len, keys )\r\n\r\n else : # else convert to string and truncate or pad\r\n \r\n for i in xrange(len(keys)) :\r\n\r\n k = keys[i] \r\n if type(k) != type( '' ) :\r\n \r\n k = make_fit( str(k) )\r\n keys[i] = k\r\n \r\n \r\n \r\n # check uniqueness of the keys\r\n \r\n\r\n # \r\n # pack the values \r\n #\r\n\r\n nkeys = len(keys) \r\n \r\n if nkeys > 255 :\r\n raise Eggog( \"too many keys to send (%d > 255)\" % (nkeys, ) ) \r\n \r\n\r\n nkeys_str = struct.pack( '=B', nkeys ) \r\n\r\n values_packed = map( self._pack_data, values )\r\n\r\n items_packed = [nkeys_str, ] * ( 2 * nkeys + 1 )\r\n items_packed[1::2] = keys[:]\r\n items_packed[2::2] = values_packed\r\n\r\n result = _cat( *items_packed ) \r\n\r\n ## #debug \r\n ## print \"_pack_dict(): nkeys, keys; values_packed\", nkeys, keys, repr(values_packed)\r\n\r\n ## print \"_pack_dict(): result:\", repr(result ) \r\n\r\n return result", "def encode(key, value, ber_length=0):\n return bytearray(key) + encode_ber(len(value), ber_length) + bytearray(value)", "def key_type(self) -> global___Type:", "def decode(k, key_length):\n key = k[:key_length]\n val_length, ber_length = decode_ber(k[key_length:])\n value = k[key_length + ber_length : key_length + ber_length + val_length]\n return key, value", "def __init__(self, batch_size, vocab_size, memory_size,\n\t\t\t\t query_size, key_size, value_size, embedding_size,\n\t\t\t\t feature_size=30,\n\t\t\t\t hops=3,\n\t\t\t\t l2_lambda=0.2,\n\t\t\t\t name='KeyValueMemN2N'):\n\t\tself._key_size = key_size\n\t\tself._value_size = value_size\n\t\tself._batch_size = batch_size\n\t\tself._vocab_size = vocab_size\n\t\tself._memory_key_size = memory_size\n\t\tself._memory_value_size = memory_size\n\t\tself._query_size = query_size\n\t\t#self._wiki_sentence_size = doc_size\n\t\tself._embedding_size = embedding_size\n\t\tself._hops = hops\n\t\tself._l2_lambda = l2_lambda\n\t\tself._name = name\n\t\tself._key_encoding = tf.constant(position_encoding(self._key_size, self._embedding_size), name=\"encoding\")\n\t\tself._value_encoding = tf.constant(position_encoding(self._value_size, self._embedding_size), name=\"encoding\")\n\t\tself._query_encoding = tf.constant(position_encoding(self._query_size, self._embedding_size), name=\"encoding\")\n\t\tself._build_inputs()\n\n\t\t\n\t\td = feature_size\n\t\tself._feature_size = feature_size\n\t\tself._build_graph()", "def bit_length(self, ???):", "def hashId(key, size):\n return sum([ord(c) for c in key]) % size", "def keysequence(value):\r\n return value.toString()", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def genkey(value, length = 8):\n if not isinstance(value, str):\n raise ValueError('Expected `value` to be `str`.')\n\n return blake2b(value.encode('utf-8'), digest_size=4).hexdigest()", "def JAVA_NATIVE(key):\n h = 0\n l = len(key)\n for (idx,c) in enumerate(key):\n h += ord(c)*31**(l-(idx+1))\n return _signed_int32(h)", "def encode(x):\n i = int(16384 * x)\n return Struct(\"h\").pack(i)", "def estimate_map_output_bytes(num_words, key_num_bytes, value_num_bytes):\n return num_words * (key_num_bytes + value_num_bytes)", "def key_to_struct(key: RsaKey) -> bytes:\n mod = int_to_bytes(key.n)\n exponent = int_to_bytes(key.e)\n\n return b\"\\x00\\x00\\x00\\x80\" + mod + b\"\\x00\\x00\\x00\\x03\" + exponent", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def unmarshal_compactsize(b):\n key = b[0]\n if key == 0xff:\n return b[0:9], unmarshal_uint(b[1:9])\n if key == 0xfe:\n return b[0:5], unmarshal_uint(b[1:5])\n if key == 0xfd:\n return b[0:3], unmarshal_uint(b[1:3])\n return b[0:1], unmarshal_uint(b[0:1])", "def _dict_new_sized(typingctx, n_keys, keyty, valty):\n resty = types.voidptr\n sig = resty(n_keys, keyty, valty)\n\n def codegen(context, builder, sig, args):\n n_keys = builder.bitcast(args[0], ll_ssize_t)\n\n # Determine sizeof key and value types\n ll_key = context.get_data_type(keyty.instance_type)\n ll_val = context.get_data_type(valty.instance_type)\n sz_key = context.get_abi_sizeof(ll_key)\n sz_val = context.get_abi_sizeof(ll_val)\n\n refdp = cgutils.alloca_once(builder, ll_dict_type, zfill=True)\n\n argtys = [ll_dict_type.as_pointer(), ll_ssize_t, ll_ssize_t, ll_ssize_t]\n fnty = ir.FunctionType(ll_status, argtys)\n fn = ir.Function(builder.module, fnty, 'numba_dict_new_sized')\n\n args = [refdp, n_keys, ll_ssize_t(sz_key), ll_ssize_t(sz_val)]\n status = builder.call(fn, args)\n\n allocated_failed_msg = \"Failed to allocate dictionary\"\n _raise_if_error(context, builder, status, msg=allocated_failed_msg)\n\n dp = builder.load(refdp)\n return dp\n\n return sig, codegen", "def encode(x):\n i = int(16384 * x)\n return Struct('h').pack(i)", "def calc_keyid(flags, protocol, algorithm, st):\n # Remove spaces and create the wire format\n st0=st.replace(' ', '')\n st2=struct.pack('!HBB', int(flags), int(protocol), int(algorithm))\n st2+=base64.b64decode(st0)\n \n # Calculate the tag\n cnt=0\n for idx in xrange(len(st2)):\n s=struct.unpack('B', st2[idx])[0]\n if (idx % 2) == 0:\n cnt+=s<<8\n else:\n cnt+=s\n \n ret=((cnt & 0xFFFF) + (cnt>>16)) & 0xFFFF\n \n return(ret)", "def xxh128(data):\n storage_key1 = bytearray(xxhash.xxh64(data, seed=0).digest())\n storage_key1.reverse()\n\n storage_key2 = bytearray(xxhash.xxh64(data, seed=1).digest())\n storage_key2.reverse()\n\n return storage_key1 + storage_key2", "def loads_value(type_key, binary_data, version, vectors):\n # pylint: disable=too-many-return-statements\n\n if isinstance(type_key, bytes):\n type_key = type_keys.Value(type_key)\n\n if type_key == type_keys.Value.INTEGER:\n return struct.unpack(\"!q\", binary_data)[0]\n if type_key == type_keys.Value.FLOAT:\n return struct.unpack(\"!d\", binary_data)[0]\n if type_key == type_keys.Value.COMPLEX:\n return complex(*struct.unpack(formats.COMPLEX_PACK, binary_data))\n if type_key == type_keys.Value.NUMPY_OBJ:\n return common.data_from_binary(binary_data, np.load)\n if type_key == type_keys.Value.STRING:\n return binary_data.decode(common.ENCODE)\n if type_key == type_keys.Value.NULL:\n return None\n if type_key == type_keys.Value.CASE_DEFAULT:\n return CASE_DEFAULT\n if type_key == type_keys.Value.PARAMETER_VECTOR:\n return common.data_from_binary(binary_data, _read_parameter_vec, vectors=vectors)\n if type_key == type_keys.Value.PARAMETER:\n return common.data_from_binary(binary_data, _read_parameter)\n if type_key == type_keys.Value.PARAMETER_EXPRESSION:\n if version < 3:\n return common.data_from_binary(binary_data, _read_parameter_expression)\n else:\n return common.data_from_binary(\n binary_data, _read_parameter_expression_v3, vectors=vectors\n )\n\n raise exceptions.QpyError(f\"Serialization for {type_key} is not implemented in value I/O.\")", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def h_python(key, N):\n return hash(key) % N", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def ohe_sequence(sequence, batch_size, seq_size, dict_size):\n bow_vec = np.zeros((batch_size, seq_size, dict_size), dtype=np.float32)\n\n for flow_num in range(batch_size):\n for action_num in range(seq_size):\n bow_vec[flow_num, action_num, sequence[flow_num][action_num]] = 1\n\n return bow_vec", "def key(nullable=True):\n return sa.Column(\n \"key\",\n sa.Text().with_variant(mysql.VARCHAR(255), \"mysql\"),\n nullable=nullable,\n )", "def RSA_KEYPAIR_SIZE() :\n return 512", "def testIntegerKeys(self):\n hd = HeapDict(size=1)\n hd.push(1, 2)\n self.assertEqual(hd.get_result(), {1: [2]})", "def keyGen(key):\n def leftShift(keyBitList):\n \"\"\"Perform a circular left shift on the first and second five bits\"\"\"\n shiftedKey = [None] * KeyLength\n shiftedKey[0:9] = keyBitList[1:10]\n shiftedKey[4] = keyBitList[0]\n shiftedKey[9] = keyBitList[5]\n return shiftedKey\n\n # Converts input key (integer) into a list of binary digits\n keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]\n permKeyList = [None] * KeyLength\n for index, elem in enumerate(P10table):\n permKeyList[index] = keyList[elem - 1]\n shiftedOnceKey = leftShift(permKeyList)\n shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))\n subKey1 = subKey2 = 0\n for index, elem in enumerate(P8table):\n subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]\n subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]\n return (subKey1, subKey2)", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def _key(self):\n return (self.name, self.struct_types, self.struct_values)", "def __extract_fields(self):\n for name, stuff in self.data.items():\n if stuff == (): # Empty tuple == 1 bit, value of 0\n self.fields.append(Field(name=name, value=0, size=1))\n elif isinstance(stuff, int): # int == specified value, value of 0\n self.fields.append(Field(name=name, value=0, size=stuff))\n elif isinstance(stuff, str): # str == specified value, value of 0\n pattern = re.compile(\"[0-9]+[bB]\")\n if pattern.match(stuff):\n if \"b\" in stuff: # bits specified\n size = int(stuff[:stuff.lower().index(\"b\")])\n self.fields.append(Field(name=name, value=0, size=size))\n elif \"B\" in stuff: # Bytes specified\n size = int(stuff[:stuff.lower().index(\"b\")]) * 8\n self.fields.append(Field(name=name, value=0, size=size))\n else: # No other string option, so must have been one of the \"vary\" constants from above.\n self.fields.append(Field(name=name, value=stuff, size=\"vary\"))\n elif isinstance(stuff, tuple) or isinstance(stuff, list): # specified value and size.\n if isinstance(stuff[0], str):\n if \"b\" in stuff[0]: # Bits\n size = int(stuff[0][:stuff[0].lower().index(\"b\")])\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif \"B\" in stuff[0]: # Bytes\n size = int(stuff[0][:stuff[0].lower().index(\"b\")]) * 8\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif stuff[0].lower() == NULL_TERMINATE:\n self.fields.append(Field(name=name, value=stuff[1], size=NULL_TERMINATE))\n elif stuff[0].lower() == PREFIX_LENGTH:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LENGTH))\n elif stuff[0].lower() == PREFIX_LEN_NULL_TERM:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LEN_NULL_TERM))\n elif stuff[0].lower() == IPv4:\n self.fields.append(Field(name=name, value=stuff[1], size=IPv4))\n elif isinstance(stuff[0], int):\n # if not self.__check_bit_size(stuff[1], stuff[0]):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(stuff[0]) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=stuff[0]))", "def __init__(self, name, id=0, xtd=0, rtr= 0 ,dtype='u8', endian='intel', startbit=0, bitlength=32, val=0.0 ):\r\n self._name = name\r\n self._id = id\r\n self._xtd = xtd\r\n self._rtr = rtr \r\n self._dtype = dtype\r\n self._endian = endian\r\n self._startbit = startbit\r\n self._bitlength = bitlength\r\n self._val = val", "def encode_vector_of_t(value: list):\n return encode_u32(len(value)) + bytes([i for j in value for i in j])", "def create_key ():", "def encode(self,\n data: mx.sym.Symbol,\n data_length: Optional[mx.sym.Symbol],\n seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:\n pass", "def StoreBits32(self, val):\n tmp_val = struct.pack(\">L\", val)\n self.StoreBits( (StrToList(tmp_val), 32))", "def generate_random_binary(length):\n key = [str(random.randint(0,1)) for x in range(length)]\n return \"\".join(key)", "def memory_key_values(k, v, num_mem_kv, dim_batch, dim_heads, variable_dtype, mesh):\n\n dim_mem_kv = mtf.Dimension(\"mem_kv_sequence\", num_mem_kv)\n emb_dim = k.shape[-1]\n mem_std = 1 / math.sqrt(emb_dim.size)\n\n mem_k = mtf.get_variable(mesh, \"mem_k\", mtf.Shape([dim_mem_kv, dim_heads, emb_dim]),\n initializer=tf.random_normal_initializer(stddev=mem_std),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype,\n )\n mem_v = mtf.get_variable(mesh, \"mem_v\", mtf.Shape([dim_mem_kv, dim_heads, emb_dim]),\n initializer=tf.random_normal_initializer(stddev=mem_std),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype)\n\n mem_k, mem_v = map(lambda t: mtf.broadcast(t, [dim_batch, dim_mem_kv, dim_heads, emb_dim]),\n (mem_k, mem_v))\n mem_k, mem_v = map(lambda t: mtf.rename_dimension(t, \"mem_kv_sequence\", \"sequence\"),\n (mem_k, mem_v))\n\n k = mtf.concat([mem_k, k], \"sequence\")\n v = mtf.concat([mem_v, v], \"sequence\")\n return k, v", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def _to_packed(self, value):\n raise NotImplementedError", "def create_shared_key(self, scalar: bytes, point: bytes) -> bytes:", "def encode(self,\n data: mx.sym.Symbol,\n data_length: Optional[mx.sym.Symbol],\n seq_len: int = 0) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:\n return data, data_length, seq_len", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def test_bit_insert_value_byte_size_too_large(self):\n value = bytearray([3] * 6)\n ops = [bitwise_operations.bit_insert(self.five_255_bin, 0, 6, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([3] * 6 + [255] * 5)\n assert bins[self.five_255_bin] == expected_result", "def pack(dicty: dict[str, Any]) -> bytes:\n column_count = 0\n column_directory = []\n directory_offset = 0\n name_offset = 0\n names = []\n data_offset = 0\n data = []\n total_encname_length = 0\n\n dicty_names_encoded = {key.encode(\"utf-8\"): value for key, value in dicty.items()}\n\n for encname in sorted(dicty_names_encoded.keys(), key=name_order):\n value = dicty_names_encoded[encname]\n if value is None:\n continue\n\n if len(encname) > MAX_NAME_LENGTH:\n raise DynColLimitError(\"Key too long: \" + encname.decode(\"utf-8\"))\n total_encname_length += len(encname)\n if total_encname_length > MAX_TOTAL_NAME_LENGTH:\n raise DynColLimitError(\"Total length of keys too long\")\n\n try:\n encode_func = ENCODE_FUNCS[type(value)]\n except KeyError:\n raise DynColTypeError(f\"Unencodable type {type(value)}\")\n dtype, encvalue = encode_func(value)\n\n column_count += 1\n column_directory.append(name_offset)\n column_directory.append((data_offset << 4) + dtype)\n names.append(encname)\n name_offset += len(encname)\n data.append(encvalue)\n data_offset += len(encvalue)\n\n directory_offset += 2\n\n data_size_flag, coldir_size_code, odd_sized_datacode = data_size(data)\n\n flags = 4 | data_size_flag # means this contains named dynamic columns\n enc_names = b\"\".join(names)\n\n buf = [struct_pack(\"<BHH\", flags, column_count, len(enc_names))]\n if not odd_sized_datacode:\n buf.append(\n struct_pack(\n \"<\" + (\"H\" + coldir_size_code) * (len(column_directory) // 2),\n *column_directory,\n )\n )\n else:\n for i, val in enumerate(column_directory):\n if i % 2 == 0:\n # name_offset\n buf.append(struct_pack(\"<H\", val))\n else:\n # data_offset + dtype, have to cut last byte\n value = struct_pack(\"<\" + coldir_size_code, val)\n buf.append(value[:-1])\n buf.append(enc_names)\n buf.extend(data)\n return b\"\".join(buf)", "def reduce_length(key, values):\n yield str((key, len(values)))", "def key_type(self):\n raise exceptions.NotImplementedError()", "def sizes(cls, p): \n try:\n return cls.table_dict[p]\n except:\n assert p > 1 and p & (p + 1) == 0, str(p)\n d = cls.tables.copy()\n d[\"P\"] = p\n d[\"P_BITS\"] = P_BITS = bitlen(p)\n FIELD_BITS = P_BITS\n while (FIELD_BITS & (FIELD_BITS - 1)): \n FIELD_BITS += 1 \n d[\"FIELD_BITS\"] = FIELD_BITS\n d[\"LOG_FIELD_BITS\"] = hibit(FIELD_BITS)\n d[\"INT_FIELDS\"] = INT_FIELDS = cls.INT_BITS // FIELD_BITS\n d[\"LOG_INT_FIELDS\"] = hibit(INT_FIELDS)\n V24_INTS = 32 // INT_FIELDS\n d[\"V24_INTS\"] = V24_INTS\n d[\"LOG_V24_INTS\"] = hibit(V24_INTS)\n d[\"V24_INTS_USED\"] = V24_INTS - (V24_INTS >> 2)\n V64_INTS = 64 // INT_FIELDS\n d[\"V64_INTS\"] = V64_INTS\n d[\"LOG_V64_INTS\"] = hibit(V64_INTS)\n d[\"MMV_INTS\"] = 3 * (2048 + 24) * V24_INTS + 759 * V64_INTS\n partial_smask = partial(smask_default, FIELD_BITS)\n d[\"smask\"] = UserFormat(partial_smask, fmt = c_hex)\n cls.table_dict[p] = d\n return d", "def testOneSize(self):\n hd = HeapDict(size=1)\n hd.push('a', 2)\n hd.push('a', 1)\n hd.push('b', 3)\n hd.push('b', 4)\n self.assertEqual(hd.get_result(), {'a': [2], 'b': [4]})", "def create_data_set():\n data_set = {}\n for index in range(1024):\n size = random.randint(1, 100) #nosec\n key = str(index).encode(\"utf-8\")\n data_set[key] = get_random_bytes(size)\n return data_set", "def key_id(key, origin=None):\n\n rdata = _to_rdata(key, origin)\n if key.algorithm == RSAMD5:\n return (rdata[-3] << 8) + rdata[-2]\n else:\n total = 0\n for i in range(len(rdata) // 2):\n total += (rdata[2 * i] << 8) + rdata[2 * i + 1]\n if len(rdata) % 2 != 0:\n total += rdata[len(rdata) - 1] << 8\n total += (total >> 16) & 0xFFFF\n return total & 0xFFFF", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def analyze_bit(key: List[Key],\n side_length: int,\n random_bits: str,\n message_bits: str):\n # Concatenate the input to get cube input.\n cube_input = message_bits + random_bits\n # Initialize the cube.\n cube = Cube(cube_input=cube_input, cube_side_length=side_length)\n\n # Xor, Shift, and apply move onto the cube.\n for each_key in key:\n cube.xor()\n cube.shift_cubie_content()\n cube.shift(key=each_key)\n\n # Count number of zeros and number of ones.\n return {\n \"0\": cube.content.count(\"0\"),\n \"1\": cube.content.count(\"1\")\n }", "def test_pos_operate_with_bin_length_extra_nostricttypes(self):\n key = (\"test\", \"demo\", 1)\n\n max_length = \"a\"\n for _ in range(20):\n max_length = max_length + \"a\"\n\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": aerospike.OPERATOR_INCR, \"bin\": max_length, \"val\": 3},\n ]\n\n TestOperate.client_no_typechecks.operate(key, llist)\n\n (key, _, bins) = TestOperate.client_no_typechecks.get(key)\n\n assert bins == {\"name\": \"ramname1\", \"age\": 1}", "def test_matrix_kv(matrix):\n assert isinstance(matrix.kv, unitdata.Storage)", "def __getKeyInformation( self , flaglist ):\n\t\tkeyinfo = 0\n\t\tif 'HMAC_MD5_RC4' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 0 )\n\t\tif 'HMAC_SHA1_AES' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 1 )\n\t\tif 'group' in flaglist:\n\t\t\tpass\n\t\tif 'pairwise' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 3 )\n\t\tif 'idx0' in flaglist:\n\t\t\tpass\n\t\tif 'idx1' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 4 )\n\t\tif 'idx2' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 5 )\n\t\tif 'install' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 6 )\n\t\tif 'ack' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 7 )\n\t\tif 'mic' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 8 )\n\t\tif 'secure' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 9 )\n\t\tif 'error' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 10 )\n\t\tif 'request' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 11 )\n\t\tif 'encrypted' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 12 )\n\t\treturn keyinfo", "def _dict_length(typingctx, d):\n resty = types.intp\n sig = resty(d)\n\n def codegen(context, builder, sig, args):\n fnty = ir.FunctionType(\n ll_ssize_t,\n [ll_dict_type],\n )\n fn = cgutils.get_or_insert_function(builder.module, fnty,\n 'numba_dict_length')\n [d] = args\n [td] = sig.args\n dp = _container_get_data(context, builder, td, d)\n n = builder.call(fn, [dp])\n return n\n\n return sig, codegen", "def decodeKeyRecord(keyFile,needToSwap,nRecordTypes):\n\n record = array.array('I')\n record.fromfile(keyFile,6)\n if needToSwap: record.byteswap()\n syncValue = (record[0],record[1],record[2])\n recordIndex = (record[4]<<32) | record[5]\n return (syncValue,recordIndex)", "def decode_key(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()", "def generate_key(self, size):\n key = bytearray()\n for i in range(0,size):\n random_byte = ord(os.urandom(1))\n key.append(random_byte)\n return key", "def generate_keypair(self, key_length: int = 2048) -> Tuple[bytes, bytes]:\n\n return None", "def __init__(self, *args):\n\n BaseDataTypes.__init__(self, *args)\n self.const_integer = 2\n self.const_integer_octet_encoded = '\\x02'\n self.const_integer_short_encoded = '\\x00\\x02'\n self.const_integer_long_encoded = '\\x00\\x00\\x00\\x02'\n self.const_integer_long_long_encoded = '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02'", "def __bytes__(self):\n prm = self.package(self.p, LEN_PRIME)\n gen = self.package(self.g, LEN_GEN)\n pbk = self.package(self.pk, LEN_PK)\n return prm + gen + pbk", "def __init__(self, *args):\n\n BaseDataTypes.__init__(self, *args)\n self.const_field_table_dummy_dict = {'$key1':'value1','$key2':'value2'}\n self.const_field_table_dummy_dict_encoded = '\\x00\\x00\\x00\\x22\\x05$key2S\\x00\\x00\\x00\\x06value2\\x05$key1S\\x00\\x00\\x00\\x06value1'", "def _from_packed(self, value):\n raise NotImplementedError", "def htable_put(table, key, value):", "def get_num_slots(self):\n Return the load factor for this hash table.\n\n Implement this.\n \"\"\"\n return self.elements / self.capacity\n\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 Hash, 64-bit\n\n Implement this, and/or DJB2.pyy\n \"\"\"\n\n # Your code here\n\n\n def djb2(self, key):\n \"\"\"\n DJB2 hash, 32-bit\n\n Implement this, and/or FNV-1.\n \"\"\"\n # Your code here\n\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n #return self.fnv1(key) % self.capacity\n<<<<<<< Updated upstream\n return self.djb2(key) % self.capacity\n=======\n return self.djb2(key) % len(self.storage)\n>>>>>>> Stashed changes\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # need to account for if the key value is the same \n\n i = self.hash_index(key)\n if not self.storage[i]:\n hte = HashTableEntry(key, value)\n self.storage[i] = hte\n self.elements += 1\n hte.head = HashTableEntry(key, value)\n elif self.storage[i] and self.storage[i].key != key:\n self.storage[i].insert_at_head(HashTableEntry(key, value))\n>>>>>>> Stashed changes\n\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n i = self.hash_index(key)\n node = self.storage[i]\n prev = None\n if node.key == key:\n self.storage[i] = node.next\n return\n while node != None:\n if node.key == key:\n prev.next = node.next\n self.storage[i].next = None\n return\n prev = node\n node = node.next\n self.elements -= 1\n return\n>>>>>>> Stashed changes\n\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # - find the index in the hash table for the key\n i = self.hash_index(key)\n # - search the list for that key\n if not self.storage[i]:\n return None\n else:\n if self.storage[i].find_key(key) == key:\n return self.storage[i].value\n>>>>>>> Stashed changes\n\n\n def resize(self, new_capacity):\n \"\"\"\n Changes the capacity of the hash table and\n rehashes all key/value pairs.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n prev_storage = self.storage\n self.capacity = new_cap\n self.storage = [None] * new_cap\n for i in range(len(prev_storage)):\n prev = prev_storage[i]\n if prev:\n while prev:\n if prev.key:\n self.put(prev.key, prev.value)\n prev = prev.next\n\n>>>>>>> Stashed changes\n\n\n\nif __name__ == \"__main__\":\n ht = HashTable(8)\n\n ht.put(\"line_1\", \"'Twas brillig, and the slithy toves\")\n ht.put(\"line_2\", \"Did gyre and gimble in the wabe:\")\n ht.put(\"line_3\", \"All mimsy were the borogoves,\")\n ht.put(\"line_4\", \"And the mome raths outgrabe.\")\n ht.put(\"line_5\", '\"Beware the Jabberwock, my son!')\n ht.put(\"line_6\", \"The jaws that bite, the claws that catch!\")\n ht.put(\"line_7\", \"Beware the Jubjub bird, and shun\")\n ht.put(\"line_8\", 'The frumious Bandersnatch!\"')\n ht.put(\"line_9\", \"He took his vorpal sword in hand;\")\n ht.put(\"line_10\", \"Long time the manxome foe he sought--\")\n ht.put(\"line_11\", \"So rested he by the Tumtum tree\")\n ht.put(\"line_12\", \"And stood awhile in thought.\")\n\n print(\"\")\n\n # Test storing beyond capacity\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n # Test resizing\n old_capacity = ht.get_num_slots()\n ht.resize(ht.capacity * 2)\n new_capacity = ht.get_num_slots()\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n print(\"\")", "def __generate_key(length):\n if length % 2 != 0:\n raise ValueError(\"'length' must be a multiple of 2\")\n length_bytes = int(length / 2) # length of key in bytes\n key_bytes = os.urandom(length_bytes)\n return binascii.hexlify(key_bytes).decode()", "def appenddictitemsize(self, key, numents):\n self._dentsvertsdata[key].appendsize(numents * self._multFactor)", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def get_dict_of_bytes2(self):\n pass", "def encode_metadata_size(metasiz):\n return SizeEncoder.pack(metasiz)", "def _set_key(self, key):\n\n # select 56 bits from the 64-bit key\n key = self._permutate(self.__pc1, self._string_to_bitlist(key))\n self.L = key[:28]\n self.R = key[28:]\n for i in range(0, 16):\n for j in range(0, self.__left_rotations[i]):\n self.L.append(self.L[0])\n del self.L[0]\n self.R.append(self.R[0])\n del self.R[0]\n # select 48 bits from 56 bits\n self.Kn[i] = self._permutate(self.__pc2, self.L + self.R)", "def store(self,key,start,end,data):\n\n pass", "def saveInTuple(list,name,type=\"R\",tname = \"T\", keytypes = {}):\r\n if keytypes == {}:\r\n\t for key in list[0].keys(): keytypes[key] = \"F\"\r\n \r\n print \"saving data\"\r\n labels=[]\r\n vvv=vector(0,0,0)\r\n if len(list)==0:\r\n print \"list size = 0\"\r\n return 0\r\n for i in range(4):\r\n if 'has_key' in dir(list[0]):break\r\n list=desagrupate(list)\r\n for key in list[0].keys():\r\n if (dir(list[0][key])==dir(vvv))or(dir(list[0][key])==dir(labels)):\r\n \r\n s=len(list[0][key])\r\n for j in range(s): \r\n labels.append(key+str(j+1)+\"/\" + keytypes[key])\r\n else: labels.append(key+\"/\"+keytypes[key])\r\n if type in [\"X\", \"ASCII\"]:\r\n TUP=XTuple(name,labels)\r\n if type==\"R\":\r\n TUP=RTuple(name,labels,tname)\r\n if type==\"M\":\r\n TUP=MTuple(name,labels)\r\n for thing in list:\r\n for key in thing.keys():\r\n #if key+\"1/\" + keytypes[key] in labels:\r\n # s=len(thing[key])\r\n # for j in range(s):\r\n # TUP.fillItem(key+str(j+1),thing[key][j])\r\n TUP.fillItem(key,thing[key])\r\n TUP.fill()\r\n TUP.close()", "def pack(buffer, *values):\n write_bytes = buffer.write\n\n def write_header(size, tiny, small=None, medium=None, large=None):\n if 0x0 <= size <= 0xF and tiny is not None:\n write_bytes(bytearray([tiny + size]))\n elif size < 0x100 and small is not None:\n write_bytes(bytearray([small]))\n write_bytes(PACKED_UINT_8[size])\n elif size < 0x10000 and medium is not None:\n write_bytes(bytearray([medium]))\n write_bytes(PACKED_UINT_16[size])\n elif size < 0x100000000 and large is not None:\n write_bytes(bytearray([large]))\n write_bytes(struct_pack(\">I\", size))\n else:\n raise ValueError(\"Collection too large\")\n\n for value in values:\n\n # None\n if value is None:\n write_bytes(b\"\\xC0\") # NULL\n\n # Boolean\n elif value is True:\n write_bytes(b\"\\xC3\")\n elif value is False:\n write_bytes(b\"\\xC2\")\n\n # Float (only double precision is supported)\n elif isinstance(value, float):\n write_bytes(b\"\\xC1\")\n write_bytes(struct_pack(\">d\", value))\n\n # Integer\n elif isinstance(value, integer_types):\n if -0x10 <= value < 0x80:\n write_bytes(PACKED_UINT_8[value % 0x100])\n elif -0x80 <= value < -0x10:\n write_bytes(b\"\\xC8\")\n write_bytes(PACKED_UINT_8[value % 0x100])\n elif -0x8000 <= value < 0x8000:\n write_bytes(b\"\\xC9\")\n write_bytes(PACKED_UINT_16[value % 0x10000])\n elif -0x80000000 <= value < 0x80000000:\n write_bytes(b\"\\xCA\")\n write_bytes(struct_pack(\">i\", value))\n elif INT64_MIN <= value < INT64_MAX:\n write_bytes(b\"\\xCB\")\n write_bytes(struct_pack(\">q\", value))\n else:\n raise ValueError(\"Integer %s out of range\" % value)\n\n # String\n elif isinstance(value, string_types):\n encoded = bstr(value)\n write_header(len(encoded), 0x80, 0xD0, 0xD1, 0xD2)\n write_bytes(encoded)\n\n # Byte array\n elif isinstance(value, bytes_types):\n write_header(len(value), None, 0xCC, 0xCD, 0xCE)\n write_bytes(bytes(value))\n\n # List\n elif isinstance(value, list):\n write_header(len(value), 0x90, 0xD4, 0xD5, 0xD6)\n pack(buffer, *value)\n\n # Map\n elif isinstance(value, dict):\n write_header(len(value), 0xA0, 0xD8, 0xD9, 0xDA)\n for key, item in value.items():\n pack(buffer, key, item)\n\n # Structure\n elif isinstance(value, Structure):\n write_header(len(value), 0xB0, None, None, None)\n write_bytes(bytearray([value.tag]))\n pack(buffer, *value.fields)\n\n # Other\n else:\n raise TypeError(\"Values of type %s are not supported\" % type(value))", "def _fill_cdata(cls):\n\n funcs = {}\n for key, name in [(\"b\", \"char\"), (\"h\", \"short\"),\n (\"i\", \"int\"), (\"q\", \"longlong\")]:\n for echar, esuffix in [(\"<\", \"le\"), (\">\", \"be\")]:\n esuffix = \"_\" + esuffix\n for unsigned in [True, False]:\n s = struct.Struct(echar + (key.upper() if unsigned else key))\n get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]\n unpack = get_wrapper(s.unpack)\n unpack_from = get_wrapper(s.unpack_from)\n\n def get_unpack_from(s):\n def unpack_from(data, offset=0):\n return s.unpack_from(data, offset)[0], offset + s.size\n return unpack_from\n\n unpack_from = get_unpack_from(s)\n pack = s.pack\n\n prefix = \"u\" if unsigned else \"\"\n if s.size == 1:\n esuffix = \"\"\n bits = str(s.size * 8)\n\n if unsigned:\n max_ = 2 ** (s.size * 8) - 1\n min_ = 0\n else:\n max_ = 2 ** (s.size * 8 - 1) - 1\n min_ = - 2 ** (s.size * 8 - 1)\n\n funcs[\"%s%s_min\" % (prefix, name)] = min_\n funcs[\"%s%s_max\" % (prefix, name)] = max_\n funcs[\"%sint%s_min\" % (prefix, bits)] = min_\n funcs[\"%sint%s_max\" % (prefix, bits)] = max_\n\n funcs[\"%s%s%s\" % (prefix, name, esuffix)] = unpack\n funcs[\"%sint%s%s\" % (prefix, bits, esuffix)] = unpack\n funcs[\"%s%s%s_from\" % (prefix, name, esuffix)] = unpack_from\n funcs[\"%sint%s%s_from\" % (prefix, bits, esuffix)] = unpack_from\n funcs[\"to_%s%s%s\" % (prefix, name, esuffix)] = pack\n funcs[\"to_%sint%s%s\" % (prefix, bits, esuffix)] = pack\n\n for key, func in iteritems(funcs):\n setattr(cls, key, staticmethod(func))", "def keyIndex(self, key):\n key ^= bsr(key, 33)\n key *= 0xff51afd7ed558ccdL\n key ^= bsr(key, 33)\n key *= 0xc4ceb9fe1a85ec53L\n key ^= bsr(key, 33)\n return key", "def __construct_attibute_values(tag: IppTag, value: Any) -> bytes:\n bs = b\"\"\n\n if tag in (IppTag.INTEGER, IppTag.ENUM):\n bs += struct.pack(\">h\", 4)\n bs += struct.pack(\">i\", value)\n elif tag == IppTag.BOOLEAN:\n bs += struct.pack(\">h\", 1)\n bs += struct.pack(\">?\", value)\n else:\n bs += struct.pack(\">h\", len(value))\n bs += value.encode(\"utf-8\")\n\n return bs", "def h2(self, key) -> int:\n idx: int = (self.b * self.encode(key)) % self.table_size\n return idx if idx != 0 else 1", "def estimate_map_output_materialized_bytes(num_words, num_reducers, key_num_bytes, value_num_bytes):\n SPILL_FILE_PARTITION_INDICATOR_NUM_BYTES = 6\n\n return (num_words * (zero_compress.size_of_zero_compressed_int64(key_num_bytes) +\n key_num_bytes +\n zero_compress.size_of_zero_compressed_int64(value_num_bytes) +\n value_num_bytes) +\n (SPILL_FILE_PARTITION_INDICATOR_NUM_BYTES * num_reducers))", "def encode_u32(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.U32].LENGTH, False)", "def chunk_type(self) -> global___Type.BytesType:", "def keyExp(key):\r\n def sub2Nib(b):\r\n \"\"\"Swap each nibble and substitute it using sBox\"\"\"\r\n return sBox[b >> 4] + (sBox[b & 0x0f] << 4)\r\n \r\n Rcon1, Rcon2 = 0b10000000, 0b00110000\r\n w[0] = (key & 0xff00) >> 8\r\n w[1] = key & 0x00ff\r\n w[2] = w[0] ^ Rcon1 ^ sub2Nib(w[1])\r\n w[3] = w[2] ^ w[1]\r\n w[4] = w[2] ^ Rcon2 ^ sub2Nib(w[3])\r\n w[5] = w[4] ^ w[3]", "def encode_length(value):\n if value == Length.INDEFINITE:\n return bytes([0b10000000])\n\n if value < 127:\n return bytes([value])\n\n output = []\n while value > 0:\n value, remainder = value // 256, value % 256\n output.insert(0, remainder)\n\n # prefix length information\n output = [0b10000000 | len(output)] + output\n return bytes(output)", "def _key(self):\n return (self.name, self.array_type.upper(), self.values)", "def unpack( self, key, data ) :\r\n\r\n return struct.unpack(self[key], data)", "def pack( self, key, *args ) :\r\n\r\n ## return struct.pack(self[key], arg)\r\n # packing and unpacking here are assymetrical,\r\n # as for packing we want to send a complete string :\r\n ## fmt = '=c' + self[key].lstrip('@=<>!') \r\n\r\n fmt = '=c' + self[key].lstrip('=') \r\n\r\n # or more strict ( though not tested ) : \r\n '''\r\n fmt_ = self[key].lstrip('=') \r\n prefix = '=' \r\n PREFIXES = '@=<>!' \r\n if fmt_[0] in PREFIXES : \r\n\r\n prefix = fmt_[0] \r\n\r\n fmt = prefix + 'c' + self[key].lstrip( PREFIXES ) \r\n ''' \r\n\r\n\r\n ## # debug \r\n ## print \"format string: '%s', args: \" % (fmt, ), args \r\n\r\n result = struct.pack(fmt, key, *args) \r\n\r\n ## # debug \r\n ## print result\r\n\r\n return result", "def gen_parameters(generator=2,key_size=2048,backend=backend):\n\treturn dh.generate_parameters(generator,key_size,backend)", "def gen_keys():", "def test_encode_pair():\n\tassert encode_pair(0, 0) == 0\n\tassert encode_pair(1, 0) == 1\n\tassert encode_pair(0, 1) == 2\n\tassert encode_pair(4, 6) == 207", "def make_map(\n name: str, key_type: int, keys: List[Any], values: SequenceProto\n) -> MapProto:\n map_proto = MapProto()\n valid_key_int_types = [\n TensorProto.INT8,\n TensorProto.INT16,\n TensorProto.INT32,\n TensorProto.INT64,\n TensorProto.UINT8,\n TensorProto.UINT16,\n TensorProto.UINT32,\n TensorProto.UINT64,\n ]\n map_proto.name = name\n map_proto.key_type = key_type\n if key_type == TensorProto.STRING:\n map_proto.string_keys.extend(keys)\n elif key_type in valid_key_int_types:\n map_proto.keys.extend(keys)\n map_proto.values.CopyFrom(values)\n return map_proto", "def two_x64_concat(data):\n storage_key = bytearray(xxhash.xxh64(data, seed=0).digest())\n storage_key.reverse()\n\n return storage_key + data", "def hash_function(s):\n\n # O(n) over the key length\n # O(1) over the HASH_DATA_SIZE\n\n bytes_list = s.encode()\n\n total = 0\n\n\n for b in bytes_list: # O(n) over the length of the key\n total += b\n\n\n total &= 0xffffffff # 32 bit (8 f's)\n\n return total", "def __hash__(self):\n hash_value = 0\n \n # required\n hash_value ^= self.required << 14\n \n # title\n hash_value ^= hash(self.title)\n \n # type\n hash_value ^= hash(self.type)\n \n # values\n values = self.values\n if (values is not None):\n hash_value ^= len(values)\n \n for value in values:\n hash_value ^= hash(value)\n \n return hash_value" ]
[ "0.5809457", "0.5597021", "0.55033654", "0.5478504", "0.5475797", "0.5431724", "0.54011375", "0.53711766", "0.535695", "0.53174025", "0.53174025", "0.5285025", "0.5257738", "0.52197987", "0.5217525", "0.52163196", "0.51897675", "0.51784354", "0.5164822", "0.5161085", "0.5133049", "0.5132771", "0.51233876", "0.5116999", "0.50814086", "0.5077301", "0.5075172", "0.50541294", "0.5045825", "0.50183356", "0.49607635", "0.49424416", "0.49257272", "0.49088335", "0.49065703", "0.49046287", "0.48981637", "0.48850304", "0.48770157", "0.48689777", "0.48673463", "0.48570126", "0.48528162", "0.48317724", "0.4830153", "0.48263958", "0.4824446", "0.48117653", "0.48034185", "0.48013982", "0.48013362", "0.47918406", "0.47891", "0.47864434", "0.4783491", "0.47709933", "0.47695124", "0.47545555", "0.47444972", "0.47403103", "0.47399265", "0.47394043", "0.47245452", "0.4720622", "0.4704623", "0.4704324", "0.4702176", "0.47003922", "0.46861464", "0.46854302", "0.46829802", "0.46791396", "0.4677447", "0.4666978", "0.46659872", "0.46658787", "0.46639386", "0.46624416", "0.46554172", "0.46544945", "0.4652583", "0.46434093", "0.46378738", "0.4634843", "0.46315935", "0.46279368", "0.4625642", "0.46241367", "0.46204442", "0.46193543", "0.46177632", "0.46168867", "0.46157554", "0.46121126", "0.46032542", "0.45942116", "0.45941442", "0.45936853", "0.45936197", "0.45912138" ]
0.48401326
43
serialize internal keyvalue pair to byte_array, only pickle objects when necessary
def serialize(self): byte_array = bytearray() header = ( self.sequence_number | (1 << 63) if self.type == KeyType.PUT else self.sequence_number ) # append header first byte_array.extend(byte_utils.integer_to_n_bytes_array(header, 8)) pickle_key = pickle.dumps(self.key) # key length byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_key))) # key byte array byte_array.extend(pickle_key) # it is a put operation, value is needed if self.type == KeyType.PUT: pickle_value = pickle.dumps(self.value) # value length byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_value))) # value byte array byte_array.extend(pickle_value) return bytes(byte_array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self, value) -> bytes:\n pass", "def _encode_value(self, value):\n return pickle.dumps(value)", "def serialize(obj):\n return pickle.dumps(obj)", "def dump_object(self, value):\n return pickle.dumps(value)", "def __bytes__(self):\n byteout = bytearray()\n for index in range(1, 15):\n key = \"d\" + str(index)\n if self._user_data.get(key) is not None:\n byteout.append(self._user_data[key])\n else:\n byteout.append(0x00)\n return bytes(byteout)", "def to_bytes(self, ???):", "def serialize(self, value: VALUE) -> bytes:\n raise NotImplementedError", "def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)", "def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)", "def serialize(self) -> bytes:\n return pickle.dumps(self)", "def serialize(self) -> bytes:\n pass", "def serialize(self) -> bytes:\n pass", "def dumps_value(obj):\n type_key = type_keys.Value.assign(obj)\n\n if type_key == type_keys.Value.INTEGER:\n binary_data = struct.pack(\"!q\", obj)\n elif type_key == type_keys.Value.FLOAT:\n binary_data = struct.pack(\"!d\", obj)\n elif type_key == type_keys.Value.COMPLEX:\n binary_data = struct.pack(formats.COMPLEX_PACK, obj.real, obj.imag)\n elif type_key == type_keys.Value.NUMPY_OBJ:\n binary_data = common.data_to_binary(obj, np.save)\n elif type_key == type_keys.Value.STRING:\n binary_data = obj.encode(common.ENCODE)\n elif type_key in (type_keys.Value.NULL, type_keys.Value.CASE_DEFAULT):\n binary_data = b\"\"\n elif type_key == type_keys.Value.PARAMETER_VECTOR:\n binary_data = common.data_to_binary(obj, _write_parameter_vec)\n elif type_key == type_keys.Value.PARAMETER:\n binary_data = common.data_to_binary(obj, _write_parameter)\n elif type_key == type_keys.Value.PARAMETER_EXPRESSION:\n binary_data = common.data_to_binary(obj, _write_parameter_expression)\n else:\n raise exceptions.QpyError(f\"Serialization for {type_key} is not implemented in value I/O.\")\n\n return type_key, binary_data", "def from_value(value):\n return pickle.dumps(value)", "def __bytes__(self):\n with BytesIO() as b:\n self.save(b)\n return b.getvalue()", "def to_bytes(self) -> bytes:", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def serialize(self, data):", "def get_dict_of_bytes2(self):\n pass", "def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()", "def __array__(self):\n return dict2rec(self)", "def dic_pickle_dumps_and_b64(data):\n for i in data:\n data[i] = base64.b64encode(pickle.dumps(data[i]))\n return data", "def encode(self):\r\n # Create dict from attributes. Maintain added order\r\n #jd = {'txpk': collections.OrderedDict()}\r\n jd = {'txpk':{}}\r\n\r\n for key in self.keys:\r\n val = getattr(self, key)\r\n\r\n if val is not None:\r\n if key == 'data':\r\n jd['txpk'][key] = val.decode('utf-8')\r\n else:\r\n jd['txpk'][key] = val\r\n #print('key',key)\r\n #print('valtype',type(val),val) \r\n #print(jd)\r\n \r\n return dumps(jd, separators=(',', ':'))", "def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())", "def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())", "def serialize(self):", "def serialize_forstorage(cls, obj):\n return misc.serialize_forstorage(obj)", "def ToBytes(self, value) -> bytes:\n pass", "def encode(Value):\n return base64.b64encode(zlib.compress(pickle.dumps(Value),9))", "def _store(self):\n store_dict = {}\n for key, val in self._data.items():\n store_dict[key] = pickle.dumps(val, protocol=self.v_protocol)\n store_dict[PickleResult.PROTOCOL] = self.v_protocol\n return store_dict", "def serialize(self, obj):\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def unserialize(val):\n return pickle.loads(val)", "def _serialize(self, state, handle):\n raise NotImplementedError", "def serialize_key(key: str) -> bytes:\n return key.encode(\"utf-8\")", "def to_bytes(self):\n pref = Utf8String(self.prefix)\n string = MultiByteInt31(self.index)\n\n bytes = (super(DictionaryElementRecord, self).to_bytes() +\n pref.to_bytes() +\n string.to_bytes())\n\n for attr in self.attributes:\n bytes += attr.to_bytes()\n return bytes", "def test_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "def _serialize_buffer(buffer, array_serialization=None):\n if array_serialization == 'binary':\n # WARNING: in NumPy 1.9, tostring() has been renamed to tobytes()\n # but tostring() is still here for now for backward compatibility.\n return buffer.ravel().tostring()\n elif array_serialization == 'base64':\n return {'storage_type': 'base64',\n 'buffer': base64.b64encode(buffer).decode('ascii')\n }\n raise ValueError(\"The array serialization method should be 'binary' or \"\n \"'base64'.\")", "def encode_map(value: list) -> bytes:\n raise NotImplementedError()", "def __bytes__(self):\n return bytes([self.type * 2, len(self.value)]) + bytes(self.value, 'utf-8')", "def __bytes__(self):\n return bytes([self.type * 2, len(self.value)]) + bytes(self.value, 'utf-8')", "def __bytes__(self):\n return bytes([self.type * 2, len(self.value)]) + bytes(self.value, 'utf-8')", "def _encode_data_dict(self, data_dict: dict) -> dict:\n\n # If we have _BYTES_DATA_KEY or _FS_CHECKPOINT_KEY in the data dict,\n # that means this is a directory checkpoint which has already been\n # converted into bytes. We don't want to double-encode it.\n # See the definition of super().__getstate__().\n if _BYTES_DATA_KEY in data_dict or _FS_CHECKPOINT_KEY in data_dict:\n return data_dict\n\n for k, v in data_dict.items():\n # Only check for attribute as we want to support\n # DDP, FSDP and any future approaches\n if isinstance(v, Module) and hasattr(v, \"module\"):\n data_dict[k] = v.module\n elif isinstance(v, dict):\n # We could limit this only to the MODEL_KEY, but we'd\n # miss any extra user-specified keys. This should be a\n # noop with anything but DDP/FSDP module state dicts.\n data_dict[k] = consume_prefix_in_state_dict_if_present_not_in_place(\n v, \"module.\"\n )\n\n # Convert the checkpoint dict to bytes, so that any GPU tensors that\n # are in the checkpoint dict can be properly deserialized on the\n # driver side, even if the driver does not have access to a GPU device.\n _buffer = io.BytesIO()\n torch.save(\n data_dict,\n _buffer,\n pickle_module=ray_pickle,\n pickle_protocol=pickle.HIGHEST_PROTOCOL\n # Using pickle.HIGHEST_PROTOCOL here because it's 5 for Python 3.8+,\n # but 4 for 3.7. For backward compatibility, we are not using\n # ray.cloudpickle because its default protocol is always 5.\n )\n return {ENCODED_DATA_KEY: _buffer.getvalue()}", "def serialize_to_signature(cls, value):\n return deepcopy(value)", "def serialize(self):\n pass", "def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgEd25519SignatureDepB._parser.build(c)\n return self.pack()", "def toBytes(self):\n return self.toJson().encode()", "def toBytes(self):\n return self.toJson().encode()", "def toBytes(self):\n return self.toJson().encode()", "def _to_packed(self, value):\n raise NotImplementedError", "def referent_to_bytes(referent):\n return pickle.dumps({\n 'left': referent.left_ref.address,\n 'key': referent.key,\n 'value': referent.value_ref.address,\n 'right': referent.right_ref.address,\n 'color': referent.color\n })", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.state_path\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.state_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.initial_state_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.input_keys)\n buff.write(_struct_I.pack(length))\n for val1 in self.input_keys:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.output_keys)\n buff.write(_struct_I.pack(length))\n for val1 in self.output_keys:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.cond_outcome)\n buff.write(_struct_I.pack(length))\n for val1 in self.cond_outcome:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.cond_transition)\n buff.write(_struct_I.pack(length))\n for val1 in self.cond_transition:\n length = len(val1.state_name)\n buff.write(_struct_I.pack(length))\n for val2 in val1.state_name:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val2))\n length = len(val1.state_outcome)\n buff.write(_struct_I.pack(length))\n for val2 in val1.state_outcome:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val2))\n _x = self.behavior_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.parameter_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.parameter_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.parameter_values)\n buff.write(_struct_I.pack(length))\n for val1 in self.parameter_values:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n buff.write(self.position.tostring())\n length = len(self.outcomes)\n buff.write(_struct_I.pack(length))\n for val1 in self.outcomes:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.transitions)\n buff.write(_struct_I.pack(length))\n for val1 in self.transitions:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.autonomy)\n buff.write(_struct_I.pack(length))\n pattern = '<%sb'%length\n buff.write(self.autonomy.tostring())\n length = len(self.userdata_keys)\n buff.write(_struct_I.pack(length))\n for val1 in self.userdata_keys:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.userdata_remapping)\n buff.write(_struct_I.pack(length))\n for val1 in self.userdata_remapping:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def value(self) -> Any:\n return pickle.loads(self.pickled_value)", "def encode(key, value, ber_length=0):\n return bytearray(key) + encode_ber(len(value), ber_length) + bytearray(value)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def serialize(self):\n return {\n \"key\": self.key,\n \"value\": self.value\n }", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def pickle(obj):\n return pickletools.optimize(cPickle.dumps(obj))", "def _encode(self) -> bytearray:\n return bytearray((self.tp,))", "def serialize_to_python(cls, value):\n raise NotImplementedError", "def serialize_data(self, app) -> dict:", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgEcdsaSignatureDepB._parser.build(c)\n return self.pack()", "def objToPickle(self, x):\n try:\n xp = pickle.dumps(x)\n pickle.loads(xp)\n except:\n return\n return xp", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def data(self):\n retval = copy.deepcopy(self.__dict__)\n retval[\"_Serializable_classname\"] = type(self).__name__\n retval[\"_Serializable_version\"] = \"1.0\"\n return retval", "def _serialize_data(self) -> Tuple[np.ndarray, np.ndarray]:\n\n def _serialize(data):\n buffer = pickle.dumps(data, protocol=4)\n return np.frombuffer(buffer, dtype=np.uint8)\n\n serialized_data_infos_list = [_serialize(x) for x in self.data_infos]\n address_list = np.asarray([len(x) for x in serialized_data_infos_list],\n dtype=np.int64)\n data_address: np.ndarray = np.cumsum(address_list)\n serialized_data_infos = np.concatenate(serialized_data_infos_list)\n\n return serialized_data_infos, data_address", "def test_byte_array_conversion():\n ob = ConversionTest()\n\n assert ob.ByteArrayField is None\n\n ob.ByteArrayField = [0, 1, 2, 3, 4]\n array = ob.ByteArrayField\n assert len(array) == 5\n assert array[0] == 0\n assert array[4] == 4\n\n value = b\"testing\"\n ob.ByteArrayField = value\n array = ob.ByteArrayField\n for i, _ in enumerate(value):\n assert array[i] == operator.getitem(value, i)", "def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgEd25519SignatureDepA._parser.build(c)\n return self.pack()", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def save(self, obj):\n if isinstance(obj, self.np.ndarray):\n # Compute a hash of the object:\n try:\n self._hash.update(self.np.getbuffer(obj))\n except TypeError:\n # Cater for non-single-segment arrays: this creates a\n # copy, and thus aleviates this issue.\n # XXX: There might be a more efficient way of doing this\n self._hash.update(self.np.getbuffer(obj.flatten()))\n\n # We store the class, to be able to distinguish between\n # Objects with the same binary content, but different\n # classes.\n if self.coerce_mmap and isinstance(obj, self.np.memmap):\n # We don't make the difference between memmap and\n # normal ndarrays, to be able to reload previously\n # computed results with memmap.\n klass = self.np.ndarray\n else:\n klass = obj.__class__\n # We also return the dtype and the shape, to distinguish\n # different views on the same data with different dtypes.\n\n # The object will be pickled by the pickler hashed at the end.\n obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))\n Hasher.save(self, obj)", "def persistence_serialize(self):\n raise NotImplementedError", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def __bytes__(self):\n return bytes(bytearray([int(self)]))", "def _dumpKey(self, key):\n return self.serializer.dumpKey((self.path, self._internalNs, key))", "def encode_any(value: object) -> bytes:\n raise NotImplementedError()", "def encode_byte_array(value: bytes) -> bytes:\n return bytes([]) if isinstance(value, type(None)) else value", "def serialize(self):\n return {\n\n\n }", "def __marshallable__(self):\n return dict(self.__dict__)['_data']", "def serialize(self, data):\n raise NotImplementedError", "def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgEcdsaSignature._parser.build(c)\n return self.pack()", "def encode(self, *stuff):\n if self._kv_fmt:\n result = self._encode_wire(stuff[0])\n else:\n result = self._encode_wire(stuff)\n return result.getvalue()" ]
[ "0.64888334", "0.6360432", "0.6315611", "0.6277576", "0.6177769", "0.6171004", "0.6164912", "0.6144653", "0.6144653", "0.61439574", "0.6126626", "0.6126626", "0.60924256", "0.60020936", "0.59987134", "0.59793663", "0.5974535", "0.59639865", "0.5963505", "0.5941758", "0.59316015", "0.5917161", "0.5906177", "0.58746827", "0.58746827", "0.58694506", "0.5840877", "0.5814872", "0.5797925", "0.57812107", "0.57807827", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.5768339", "0.57672", "0.5738685", "0.57341355", "0.57270616", "0.5702393", "0.5693254", "0.56799626", "0.5672603", "0.5672603", "0.5672603", "0.566388", "0.56523883", "0.56254625", "0.5617157", "0.5611244", "0.5611244", "0.5611244", "0.5608137", "0.5600587", "0.5589029", "0.5584839", "0.5581749", "0.5578323", "0.55755347", "0.55742157", "0.5567761", "0.5562246", "0.55538267", "0.55382526", "0.55287385", "0.5525205", "0.5499647", "0.54943705", "0.5486887", "0.5486887", "0.5486887", "0.5485654", "0.5485461", "0.54757965", "0.54723775", "0.5469531", "0.5456491", "0.5448452", "0.5443459", "0.54385364", "0.5422929", "0.54227847", "0.54084224", "0.5407549", "0.53878224", "0.53863317", "0.537874", "0.5367234" ]
0.7145172
0
return None is parsing failed
def deserialize(file_io): header = file_io.read(8) if len(header) != 8: return None # parsing header header = byte_utils.byte_array_to_integer(header) type = KeyType.PUT if (header & (1 << 63)) else KeyType.DELETE sequence_number = header & ((1 << 63) - 1) # parsing key and value key_size = file_io.read(4) if len(key_size) != 4: return None key_size = byte_utils.byte_array_to_integer(key_size) key_byte_array = file_io.read(key_size) if len(key_byte_array) != key_size: return None if type == KeyType.PUT: value_size = file_io.read(4) if len(value_size) != 4: return None value_size = byte_utils.byte_array_to_integer(value_size) value_byte_array = file_io.read(value_size) if len(value_byte_array) != value_size: return None key, value = pickle.loads(key_byte_array), pickle.loads(value_byte_array) return InternalKeyValue( key=key, sequence_number=sequence_number, type=type, value=value ) else: key = pickle.loads(key_byte_array) return InternalKeyValue(key=key, sequence_number=sequence_number, type=type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse(self):\n pass", "def test_simple_parse(self):\n pass", "def parse(self) -> None:\n pass", "def parse(self):", "def parse(self, text):\n node = self.match(text)\n if node is None or node.end - node.start != len(text): # TODO: Why not test just end here? Are we going to add a pos kwarg or something?\n # If it was not a complete parse, return None:\n return None\n return node", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self):\n pass", "def parse(self, input):\n pass", "def parse(self):\n raise NotImplementedError", "def parse(cls, s):\n raise NotImplementedError", "def parse(s):\n return s", "def try_parse(blob, filename=None):\n ret = None\n\n for parser in [location_csv.blob_to_dict, gpx_parser.blob_to_dict]:\n try:\n ret = parser(blob)\n if ret:\n logging.debug(\n \"try_pares -> Got return for: {}, returning!\".format(\n parser.__doc__))\n return ret\n except TypeError as e:\n logging.debug(\"Failed parsing with parser: {} -> {}\".format(\n parser.__doc__, e))\n\n return None", "def test_parse(self): \n\n results = self.parser.parse()\n self.assertEqual(results, test_case_data['parse_output'])", "def parse(token):\n\n pass", "def parse(self):\n raise NotImplementedError(\"Parse not specified!\")", "def _try_parse(self, *parse_funcs: ParseFunc) -> Optional[node.NodeType]:\n for parse_func in parse_funcs:\n try:\n with self.tokens:\n return parse_func()\n except ParserException:\n pass\n return None", "def parse(self, fstring):\n pass", "def parse_string(self, data):\n pass", "def parse(data):\n parser=Parser(data, True)\n return parser.parse()", "def _parse(self, infile):\n raise NotImplementedError()", "def parseString(self, s):\n pass", "def main():\n\ttest() #test ParseError", "def haiku_string_parser():\n pass", "def parse(t):\n return t", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def parse(cls, line):\r\n raise NotImplementedError", "def test_get_parse_not_empty(self):\n \n self.assertEqual(bitchangesets.parse_changeset(self.changeset), {'timestamp': '2013-07-27 01:56:46', 'parsed_author': 'David Leonard'})", "def _parse_error(self, str_as_xml):\n try:\n xmlDocument = minidom.parseString(str_as_xml)\n if len(xmlDocument.getElementsByTagName(\"error\")) > 0:\n error = xmlDocument.getElementsByTagName(\"message\")\n if error:\n error = error[0]\n return error.childNodes[0].nodeValue\n return None\n except Exception, detail:\n raise OAuthError(\"Invalid XML String given: error: %s\" % repr(detail))", "def parse(self, infile):\r\n raise NotImplementedError()", "def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")", "def parse(self, data):\n raise NotImplementedError", "def test_parse_devide(self):\n self.assertEqual(parse_input.parse([\"8\", \"/\", \"4\"]), 2)", "def parse(self, content):\n pass", "def parse(year):\n if year in RESULTS_FILES and year in PARSERS and PARSERS[year]:\n parser = PARSERS[year]\n return parser(year, RESULTS_FILES[year])\n return None", "def _ast_node_or_parse_exception(self):\n # This attribute may also be set by __construct_from_annotated_ast(),\n # in which case this code does not run.\n try:\n return _parse_ast_nodes(\n self.text, self._input_flags, self._auto_flags, \"exec\")\n except Exception as e:\n # Add the filename to the exception message to be nicer.\n if self.text.filename:\n try:\n e = type(e)(\"While parsing %s: %s\" % (self.text.filename, e))\n except TypeError:\n # Exception takes more than one argument\n pass\n # Cache the exception to avoid re-attempting while debugging.\n return e", "def _parse_error(self, error):\n error = str(error)\n # Nvidia\n # 0(7): error C1008: undefined variable \"MV\"\n m = re.match(r'(\\d+)\\((\\d+)\\)\\s*:\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # ATI / Intel\n # ERROR: 0:131: '{' : syntax error parse error\n m = re.match(r'ERROR:\\s(\\d+):(\\d+):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # Nouveau\n # 0:28(16): error: syntax error, unexpected ')', expecting '('\n m = re.match(r'(\\d+):(\\d+)\\((\\d+)\\):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(4)\n # Other ...\n return None, error", "def on_parse(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None", "def test_fetchParserIncompleteStringEndsInWhitespace(self):\n p = imap4._FetchParser()\n self.assertRaises(Exception, p.parseString, b\"BODY[HEADER.FIELDS \")", "def _validate_parse_result(self, result: 'TypeParserResult'):\n is_valid = result.get_implementation() is not None\n if not is_valid:\n raise AInixParseError(f\"{self} did not set a valid implementation.\")", "def test_parse_none_job(self):\n\n res = sf_c.parse_job(None)\n self.assertIs(res, None)", "def _parse(line: str) -> Tuple[int, str, str, bool]:\n level, tag, args = None, None, None\n try:\n # Try to split the line into three parts, then try two parts\n try:\n level, tag, args = line.split(' ', maxsplit=2)\n except ValueError:\n try:\n level, tag = line.split(' ', maxsplit=1)\n except ValueError:\n raise InvalidLineException(f'Invalid line: {line}')\n\n # INDI nor FAM should not be the tag\n if tag in ('INDI', 'FAM'):\n raise InvalidLineException(f'Invalid tag {line}')\n\n # If INDI or FAM are args, swap the args and tag\n if args in ('INDI', 'FAM'):\n temp = args\n args = tag\n tag = temp\n\n # Level must be between 0 and 2 inclusive\n if level not in ('0', '1', '2'):\n raise InvalidLineException(f'Invalid level: {level}')\n level = int(level)\n\n # Allow a trailing space\n if args == '':\n args = None\n\n if tag == 'INDI':\n assert level == 0\n assert args is not None\n elif tag == 'NAME':\n assert level == 1\n assert args is not None\n elif tag == 'SEX':\n assert level == 1\n assert args in ('M', 'F')\n elif tag == 'BIRT':\n assert level == 1\n assert args is None\n elif tag == 'DEAT':\n assert level == 1\n assert args is None\n elif tag == 'FAMC':\n assert level == 1\n assert args is not None\n elif tag == 'FAMS':\n assert level == 1\n assert args is not None\n elif tag == 'FAM':\n assert level == 0\n assert args is not None\n elif tag == 'MARR':\n assert level == 1\n assert args is None\n elif tag == 'HUSB':\n assert level == 1\n assert args is not None\n elif tag == 'WIFE':\n assert level == 1\n assert args is not None\n elif tag == 'CHIL':\n assert level == 1\n assert args is not None\n elif tag == 'DIV':\n assert level == 1\n assert args is None\n elif tag == 'DATE':\n assert level == 2\n assert args is not None\n # Date must have valid args\n try:\n day, month, year = args.split(' ', maxsplit=2)\n except ValueError:\n raise InvalidLineException(f'Not enough values for DATE: {args}')\n assert day == '0' or not day[0] == '0' and day.isdigit()\n assert month in ('JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC')\n assert len(year) == 4 and year.isdigit()\n elif tag == 'HEAD':\n assert level == 0\n assert args is None\n elif tag == 'TRLR':\n assert level == 0\n assert args is None\n elif tag == 'NOTE':\n assert level == 0\n else:\n raise InvalidLineException(f'Unknown Tag: {tag}')\n except (AssertionError, InvalidLineException):\n return level, tag, args, False\n\n return level, tag, args, True", "def checkParse(self, epytext, xml=None):\n errors = []\n out = parse(epytext, errors)\n if out is None: out = ''\n else: out = out.childNodes[0].toxml().strip()\n if out[:9] == '<epytext>' and out[-10:] == '</epytext>':\n out = out[9:-10]\n \n self.failIfParseError(epytext, errors)\n if xml:\n self.failUnlessEqual(`out`, `xml.strip()`)", "def parse(self): \n pass", "def postparse(self, parse_result):\n return parse_result", "def parse_data(fp):\n pass", "def Parse(self, ctx:Context, resp:Response)->Generator[Any,None,None]:\n yield Error(msg=\"Parse function not implemented\", code=Unimplemented)", "def test_fetchParserEmptyString(self):\n p = imap4._FetchParser()\n p.parseString(b'')\n self.assertFalse(len(p.result))", "def parse(cls, data):\n raise NotImplementedError", "def parse_from_tree(self, parse):\n pass", "def run(self, parsed):", "def test_140710_nogeom(self):\n with self.assertRaises(Exception):\n _ = parser(get_file('PTSDY2_nogeom.txt'))", "def test_fetchParserExpectedWhitespace(self):\n p = imap4._FetchParser()\n self.assertRaises(Exception, p.parseString, b\"BODY[HEADER.FIELDS!]\")", "def token_parse(tokens):\n\n if len(tokens) < 4:\n return None\n else:\n name = tokens[0]\n el_type = tokens[0][0]\n n1 = tokens[1]\n n2 = tokens[2]\n form = None\n freq = None\n n3 = None\n n4 = None\n v_cont = None\n value = None\n imp = None\n phase = None\n \n if not (n1.isalnum() and n2.isalnum()): # node names alphanumeric\n print(\"ERR: Node names must be alphanumeric:\", n1, n2)\n return None\n\n if n1 == n2:\n print(\"ERR: Connecting nodes must be distinct: \", tokens)\n return None\n \n if tokens[0][0] in 'RLC' and len(tokens)==4:\n value = tokens[3]\n if tokens[0][0] == 'R':\n imp = value\n\n if n1 == 'GND':\n n1,n2 = n2,n1\n\n elif tokens[0][0] in 'VI' and len(tokens)==5 and tokens[3]=='dc':\n form = 'dc'\n value = tokens[4]\n\n elif tokens[0][0] in 'VI' and len(tokens)==6 and tokens[3]=='ac':\n form = 'ac'\n value = tokens[4]\n phase = tokens[5]\n\n elif tokens[0][0] in 'EG' and len(tokens)==6:\n n3 = tokens[3]\n n4 = tokens[4]\n if not (n3.isalnum() and n4.isalnum()): # node names alphanumeric\n print(\"ERR: Node names must be alphanumeric:\", n3, n4)\n return None\n if n3 == n4:\n print(\"ERR: Voltage-controlled source received same nodes for control:\", n3, n4)\n value = tokens[5]\n\n elif tokens[0][0] in 'HF' and len(tokens)==5:\n v_cont = tokens[3]\n value = tokens[4]\n\n else: # Invalid line\n print(\"ERR: Invalid line tokens:\", tokens)\n return None\n \n quants = [freq, value, phase, imp]\n for ind in range(len(quants)):\n q = quants[ind]\n e_q = None\n\n if q != None:\n e_q = get_quant(q)\n if e_q == None:\n print(\"ERR: Invalid quantity: \", q)\n return None\n \n quants[ind] = e_q\n\n freq, value, phase, imp = quants\n\n if tokens[3] == 'ac':\n value /= 2.0 # amplitude = Vpp/2\n \n # Construct object with element data for future use\n params = [name, el_type, n1, n2, form, freq, n3, n4, v_cont, value, imp, phase]\n return Element(params)", "def run_parsing(self):\n\n if self.version:\n print(f'\"{VERSION}\"')\n return VERSION\n elif self.limit is not None and self.limit <= 0:\n print(\"Limit must be greater than 0!\")\n return \"Limit must be greater than 0!\"\n elif self.date:\n if len(str(self.date)) != 8:\n print(\"Wrong date format!\")\n else:\n self.print_if_verbose(\n f\"Method 'run_parsing' is working: \\n\"\n f\"'run_parsing' method calls 'get_content_from_cache' method: \\n\")\n self.rss_feed = self.get_content_from_cache()\n if self.rss_feed:\n if self.json:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_json_content' method: \\n\")\n self.print_json_content(self.rss_feed)\n else:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_content_from_cache' method: \\n\")\n self.print_content_from_cache(self.rss_feed)\n\n if self.to_html_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_html' method: \\n\")\n self.save_to_html(self.rss_feed)\n\n if self.to_fb2_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_fb2' method: \\n\")\n self.save_to_fb2(self.rss_feed)\n\n else:\n self.print_if_verbose(\n f\"Method 'run_parsing' is working: \\n\"\n f\"'run_parsing' method calls 'get_content' method: \\n\")\n self.content = self.get_content()\n\n if self.content:\n self.print_if_verbose(f\"'run_parsing' method calls 'process_content' method: \\n\")\n self.rss_feed = self.process_content(self.content)\n\n self.print_if_verbose(f\"'run_parsing' method calls 'save_news_to_cache' method: \\n\")\n self.save_news_to_cache(self.rss_feed)\n\n if self.json:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_json_content' method: \\n\")\n self.print_json_content(self.rss_feed)\n else:\n self.print_if_verbose(f\"'run_parsing' method calls 'print_content' method: \\n\")\n self.print_content(self.rss_feed)\n\n if self.to_html_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_html' method: \\n\")\n self.save_to_html(self.rss_feed)\n\n if self.to_fb2_path:\n self.print_if_verbose(f\"'run_parsing' method calls 'save_to_fb2' method: \\n\")\n self.save_to_fb2(self.rss_feed)\n\n self.print_if_verbose(f\"Program execution completed!\")\n\n return \"Program execution completed!\"", "def simple_parse(self, text_or_tokens):\n parses = self.parse(text_or_tokens)\n return str(parses[0].ptb_parse)", "async def parse(self, raw: str) -> dict:", "def str2optional(parser: Callable[[str], Any]) -> Callable[[str], Optional[Any]]:\n\n def _parse(string: str) -> Optional[Any]:\n if string.lower() == \"none\":\n return None\n return parser(string)\n\n return _parse", "def parse_file(self, file_name: str):\n if not os.path.exists(file_name):\n log.error('File {} does not exist'.format(file_name))\n return None\n try:\n with open(file_name) as file:\n file_content = file.readlines()\n except Exception as ex:\n log.error('Failed to read file {}: {}'.format(file_name, str(ex)))\n return None\n return self.parse_from_string(''.join(file_content))", "def test_parse(self):\n report = (\n \"KJFK 032151Z 16008KT 10SM FEW034 FEW130 BKN250 27/23 A3013 RMK AO2 SLP201\"\n )\n data, units = metar.parse(report[:4], report)\n self.assertIsInstance(data, structs.MetarData)\n self.assertIsInstance(units, structs.Units)\n self.assertEqual(data.raw, report)", "def test_make_tool_plugin_parse_invalid():\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n output = \"invalid text\"\n issues = mtp.parse_output(package, output)\n assert not issues", "def parse_play(play):\n return None", "def test_empty(self):\n self.assertRaises(ParseException, self.flag.parseString, '')", "def test_make_tool_plugin_parse_valid():\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n output = \"valid_package/hello.c:7:3: error: expected ; before return\"\n issues = mtp.parse_output(package, output)\n assert len(issues) == 1\n assert issues[0].filename == 'valid_package/hello.c'\n assert issues[0].line_number == '7'\n assert issues[0].severity == 5\n assert issues[0].message == \"expected ; before return\"", "def parse(self, word):\n raise NotImplementedError", "def test_parse_empty_file(self):\n bin.parser.parse_file(None, self.mock_db, self.tf, False)", "def _parse_othersymbol(line):\n return None", "def test_140709_nogeoms(self):\n with self.assertRaises(Exception):\n _ = parser(get_file('PTSDY3_nogeoms.txt'))", "def _parser(data=\"\"):\r\n\r\n if data == \"\":\r\n pass\r\n \r\n \"\"\"Retrieve timestamp\"\"\"\r\n validate(data[:25])\r\n \"\"\"retrieve log data\"\"\"\r\n log_data = re.findall(r\"[\\w]+\", data)\r\n \"\"\"Retrieve user info\"\"\"\r\n login_info = [log_data[i] for i in (7, 9, 11, 13, 15)]\r\n\r\n \r\n return login_info, log_data", "def mock_parser_fcn(s):", "def test_parse_task_time(self):\n ret = parse_task_time(\"asdf:adsf\")\n self.assertEqual(None, ret)\n\n ret = parse_task_time('\\n')\n self.assertEqual(None, ret)", "def test_parse_malformed_url(self):\r\n url = u'http://whttp://lucumr.pocoo.org/2012/8/5/stateless-and-proud/'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)", "def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data", "def parse_or_reject_line(line):\n try:\n return convert_to_parkme_format(line)\n except RateCardParsingException:\n return False", "def _parse_response(self, response):\n if response is not None:\n return response.string\n return response", "def test_empty(self):\n record = ''\n\n self.assertRaises(ParseException, self.grammar.parseString, record)", "def _parse_file_line(line):\n # match initially added ebuilds\n match = ebuild_ADM_regex.match(line)\n if match:\n status = match.group('status')\n category = match.group('category')\n pkg = match.group('P')\n try:\n return atom_cls(f'={category}/{pkg}'), status\n except MalformedAtom:\n return None\n\n # match renamed ebuilds\n match = ebuild_R_regex.match(line)\n if match:\n status = match.group('status')\n category = match.group('category')\n pkg = match.group('P')\n try:\n return atom_cls(f'={category}/{pkg}'), status\n except MalformedAtom:\n return None", "def __parse(self) -> object:\r\n char = self.data[self.idx: self.idx + 1]\r\n if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']:\r\n str_len = int(self.__read_to(b':'))\r\n return self.__read(str_len)\r\n elif char == b'i':\r\n self.idx += 1\r\n return int(self.__read_to(b'e'))\r\n elif char == b'd':\r\n return self.__parse_dict()\r\n elif char == b'l':\r\n return self.__parse_list()\r\n elif char == b'':\r\n raise DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx)))\r\n else:\r\n raise DecodingError('Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))", "def parse(self, text, start=None):\n return self.parser.parse(text, start=start)", "def parse(string):\n if string.strip() == Parser.OK_MSG or string.startswith(Parser.NOT_OK_MSG):\n return Parser._handle_ok_ack(string)\n results = Parser._handle_dict(string)\n results.extend(Parser._handle_else(string))\n return results", "def test_fetchParserUnknownSection(self):\n p = imap4._FetchParser()\n self.assertRaises(Exception, p.parseString, b\"BODY[UNKNOWN]\")", "def parse_line(self, line):\n raise NotImplementedError", "def parse(self, handle):\n results = handle.read()\n\n try:\n self._scanner.feed(File.StringHandle(results), self._consumer)\n except ValueError, msg:\n # if we have a bad_report_file, save the info to it first\n if self._bad_report_handle:\n # send the info to the error handle\n self._bad_report_handle.write(results)\n\n # now we want to try and diagnose the error\n self._diagnose_error(\n File.StringHandle(results), self._consumer.data)\n\n # if we got here we can't figure out the problem\n # so we should pass along the syntax error we got\n raise\n return self._consumer.data", "def get_parse_error(code):\r\n # note that this uses non-public elements from stdlib's tabnanny, because tabnanny\r\n # is (very frustratingly) written only to be used as a script, but using it that way\r\n # in this context requires writing temporarily files, running subprocesses, blah blah blah\r\n code_buffer = StringIO(code)\r\n try:\r\n tabnanny.process_tokens(tokenize.generate_tokens(code_buffer.readline))\r\n except tokenize.TokenError, err:\r\n return \"Could not parse code: %s\" % err\r\n except IndentationError, err:\r\n return \"Indentation error: %s\" % err\r\n except tabnanny.NannyNag, err:\r\n return \"Ambiguous tab at line %d; line is '%s'.\" % (err.get_lineno(), err.get_line())\r\n return None", "def _parse_data(data: str) -> Tuple[str, str, str, int, int, int, str]:\n\n phg = None\n rng = None\n dfs = None\n course = None\n speed = None\n altitude = None\n comment = None\n\n if re.match(r'^PHG[0-9]{4}', data[:7]):\n # Packet has a PHG (power, antenna height/gain/directivity) value\n phg = data[3:7]\n logger.debug(\"PHG is {}\".format(phg))\n data = data[7:]\n\n elif re.match('^RNG[0-9]{4}', data[:7]):\n # Packet has an RNG (radio range) value\n rng = data[3:7]\n logger.debug(\"RNG is {}\".format(rng))\n data = data[7:]\n\n elif re.match('^DFS[0-9]{4}', data[:7]):\n # Packet has a DFS (DF signal strength, antenna height/gain/directivity) value\n dfs = data[3:7]\n logger.debug(\"DFS is {}\".format(dfs))\n data = data[7:]\n\n elif re.match('^[0-9]{3}/[0-9]{3}', data[:7]):\n # Packet has course and speed values\n course = int(data[:3])\n speed = int(data[4:7])\n logger.debug(\"Course is {}, speed is {}\".format(course, speed))\n data = data[7:]\n\n # TODO - parse BRG/NRQ\n\n # Check for comment\n if len(data) > 0:\n\n # Check for altitude\n # As per APRS 1.01 C6 P26, altitude as /A=nnnnnn may appear anywhere in the comment\n has_altitude = re.match('.*/A=([0-9]{6}).*', data)\n if has_altitude:\n # TODO - fix altitude format\n altitude = int(has_altitude.groups()[0])\n logger.debug(\"Altitude is {} ft\".format(altitude))\n\n # Strip out the altitude from the comment\n data = re.sub(r'/A=[0-9]{6}', \"\", data)\n\n # Set the comment as the remainder of the information field\n comment = data\n logger.debug(\"Comment is {}\".format(comment))\n\n return (phg, rng, dfs, course, speed, altitude, comment)", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def test_empty():\n assert afos_dump.real_parser(None, \"\") is None", "def __parse_market_data__(self, market_data_str):\n\t\t\"\"\"return an MarketData instance if format correct, return None otherwise\"\"\"\n\t\tOMSLogger.debug(\"Parsing market data - {0}\".format(market_data_str))\n\t\tresult = re.match(\".*var hq_str_(.+)=(.*)\", market_data_str)\n\t\tif result is not None:\t\t\t\n\t\t\tmarket_data = MarketData(result.group(1))\n\t\t\tvalue_list = string.split(result.group(2), \",\")\t\n\t\t\tmarket_data.name = value_list[0]\n\t\t\tif market_data.symbol.startswith(ProductPrefix.FUND_PREFIX):#mutual fund, get net value\n\t\t\t\tmarket_data.latest_net_value = float(value_list[1])\n\t\t\t\tmarket_data.cum_net_value = float(value_list[2])\n\t\t\t\tmarket_data.last_net_value = float(value_list[3])\n\t\t\t\tmarket_data.date, market_data.time = value_list[4:6]\n\t\t\tif market_data.symbol.startswith(ProductPrefix.SZ_PREFIX):#listed fund, get trade price\t\t\t\n\t\t\t\t#TODO self.op, self.lcp, self.tp, self.hp, self.lp, self.bbp, self.bap, self.quantity, self.notional = value_list[0:10]\n\t\t\t\tmarket_data.tp = float(value_list[3])\n\t\t\t\tmarket_data.level2 = value_list[10:30]\n\t\t\t\tmarket_data.date, market_data.time = value_list[30:32]\n\t\t\t\n\t\t\tOMSLogger.debug(\"Successfully parsed market data!\")\n\t\t\treturn market_data\n\t\tOMSLogger.error(\"Passed in market data string is not well formatted\")\n\t\treturn None", "def _parse_and_validate(self, val):\n if self._is_parameter_type:\n val = self._parse(val) if isinstance(val, str) else val\n self._validate_or_throw(val)\n return val", "def parseline(self, line):\n line = line.strip()\n if not line:\n return None, None, line\n elif line[0] == '?':\n line = 'help ' + line[1:]\n elif line[0] == '!':\n if hasattr(self, 'do_shell'):\n line = 'shell ' + line[1:]\n else:\n return None, None, line\n return '', '', line", "def _parse_preset(self, xmldata):\r\n\r\n raise NotImplementedError", "def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth assertions)\n AssertThat(result).IsNone()", "def parse(self, value):\n raise NotImplementedError(\"Please implement the Class\")", "def structure_parse(source):\r\n return structure_grammar().parseString(source)", "def null(self):\n val = self.read(4)\n if val != b'null':\n self.on_parser_error(\"null token expected\")\n return null", "def test_parse_valid(self):\n mock_scraper = MockCtdScraper()\n scrape_gen = mock_scraper.scrape(TEST_CHUNKSIZE)\n self.parser.parse(next(scrape_gen))", "def parse_header(self):", "def _linux_parse(line, s):\n output_line = {}\n\n if line.startswith('PING '):\n s.ipv4 = 'bytes of data' in line\n\n if s.ipv4 and line[5] not in string.digits:\n s.hostname = True\n # fixup for missing hostname\n line = line[:5] + 'nohost' + line[5:]\n elif s.ipv4 and line[5] in string.digits:\n s.hostname = False\n elif not s.ipv4 and ' (' in line:\n s.hostname = True\n else:\n s.hostname = False\n\n if s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif s.ipv4 and s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif not s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n else:\n dst_ip, dta_byts = (3, 4)\n\n line = line.replace('(', ' ').replace(')', ' ')\n s.destination_ip = line.split()[dst_ip].lstrip('(').rstrip(')')\n s.sent_bytes = line.split()[dta_byts]\n\n return None\n\n if line.startswith('---'):\n s.footer = True\n return None\n\n if s.footer:\n if 'packets transmitted' in line:\n if ' duplicates,' in line:\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[7].rstrip('%')\n s.duplicates = line.split()[5].lstrip('+')\n s.time_ms = line.split()[11].replace('ms', '')\n return None\n\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[5].rstrip('%')\n s.duplicates = '0'\n s.time_ms = line.split()[9].replace('ms', '')\n return None\n\n split_line = line.split(' = ')[1]\n split_line = split_line.split('/')\n output_line = {\n 'type': 'summary',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'packets_transmitted': s.packets_transmitted or None,\n 'packets_received': s.packets_received or None,\n 'packet_loss_percent': s.packet_loss_percent or None,\n 'duplicates': s.duplicates or None,\n 'time_ms': s.time_ms or None,\n 'round_trip_ms_min': split_line[0],\n 'round_trip_ms_avg': split_line[1],\n 'round_trip_ms_max': split_line[2],\n 'round_trip_ms_stddev': split_line[3].split()[0]\n }\n\n return output_line\n\n # ping response lines\n\n # request timeout\n if 'no answer yet for icmp_seq=' in line:\n timestamp = False\n isequence = 5\n\n # if timestamp option is specified, then shift icmp sequence field right by one\n if line[0] == '[':\n timestamp = True\n isequence = 6\n\n output_line = {\n 'type': 'timeout',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'icmp_seq': line.replace('=', ' ').split()[isequence]\n }\n\n return output_line\n\n # normal responses\n if ' bytes from ' in line:\n\n line = line.replace('(', ' ').replace(')', ' ').replace('=', ' ')\n\n # positions of items depend on whether ipv4/ipv6 and/or ip/hostname is used\n if s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n elif not s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif not s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n\n # if timestamp option is specified, then shift everything right by one\n timestamp = False\n if line[0] == '[':\n timestamp = True\n bts, rip, iseq, t2l, tms = (bts + 1, rip + 1, iseq + 1, t2l + 1, tms + 1)\n\n output_line = {\n 'type': 'reply',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'response_bytes': line.split()[bts],\n 'response_ip': line.split()[rip].rstrip(':'),\n 'icmp_seq': line.split()[iseq],\n 'ttl': line.split()[t2l],\n 'time_ms': line.split()[tms],\n 'duplicate': 'DUP!' in line\n }\n\n return output_line", "def parse(self, args):\n pass", "def __init__(self, parser: Any = None):" ]
[ "0.6728034", "0.65789205", "0.6491963", "0.64315605", "0.6299717", "0.6294331", "0.6294331", "0.6294331", "0.6294331", "0.6265237", "0.62311304", "0.60790735", "0.60173225", "0.6007963", "0.6007318", "0.6002458", "0.5985229", "0.59831566", "0.5947476", "0.5941684", "0.59364754", "0.59171015", "0.5880037", "0.58762586", "0.58424383", "0.5834772", "0.5828534", "0.5823135", "0.58082616", "0.57919157", "0.5762473", "0.57272124", "0.5709897", "0.57047504", "0.5701216", "0.5695454", "0.5686979", "0.5668893", "0.5639067", "0.56374586", "0.56374246", "0.56348246", "0.561963", "0.5610024", "0.56013125", "0.55980307", "0.55895126", "0.55874264", "0.55757326", "0.55720174", "0.5560529", "0.55597496", "0.5558939", "0.55516213", "0.5551507", "0.5545338", "0.5542744", "0.55296797", "0.5524904", "0.5514115", "0.5504662", "0.54936105", "0.54914355", "0.5486379", "0.5483968", "0.54838425", "0.5482096", "0.5480357", "0.5479674", "0.5474289", "0.5470663", "0.54629576", "0.5454492", "0.54435015", "0.5425282", "0.5423421", "0.5420906", "0.5411189", "0.5410028", "0.54095614", "0.54055196", "0.540542", "0.5404261", "0.54018515", "0.5400613", "0.53962654", "0.53945446", "0.537948", "0.5371269", "0.5363564", "0.53626466", "0.5356549", "0.5344758", "0.5339832", "0.53362596", "0.533168", "0.532636", "0.532432", "0.53188896", "0.5314327", "0.5307139" ]
0.0
-1
Import ASHRAE data from a directory containing the .csv files.
def import_data(ashrae_dir, filenames=const.NAMES): print('Importing data from csv') ashrae_dir = pathlib.Path(ashrae_dir) data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames} return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def load_data(path):\n\n columns = ['Item Year', 'Original Value', 'Standard Value', 'Original Currency',\n 'Standard Currency', 'Orignal Measure', 'Standard Measure', 'Location',\n 'Commodity']\n col_type = [int, float, float, object, object, object, object, object]\n\n col_type_dict = dict(zip(columns, col_type))\n\n au_df = pd.read_csv(path, usecols=columns)\n au_df = au_df.astype(col_type_dict)\n au_df.name = 'AU_data'\n \n return au_df, columns", "def data_import(path):\n train_path = os.path.join(path, \"train.csv\")\n test_path = os.path.join(path, \"test.csv\")\n df_train = pd.read_csv(train_path)\n df_test = pd.read_csv(test_path)\n return df_train, df_test", "def loadCSV(input_file):", "def import_csv_data(cr, registry):\n files = ['data/sc.info.csv']\n for file in files:\n tools.convert_file(cr, 'prospects_app', file, None,\n mode='init', noupdate=True, kind='init')", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def import_ag_data(data_csv):\n df = pd.read_csv(data_csv)\n col_to_drop = ['Program', 'Period', 'Week Ending', 'Geo Level', 'State',\n 'State ANSI', 'Zip Code', 'Region', 'watershed_code',\n 'Watershed', 'Data Item', 'Domain', 'Domain Category',\n 'Ag District', 'Ag District Code', 'CV (%)']\n df = df.drop(col_to_drop, axis=1)\n df = df[(df['Value'] != ' (D)') & (df['Value'] != ' (Z)')]\n df = df.replace(to_replace=r',', value='', regex=True)\n df['Value'] = df['Value'].astype('int')\n df = df.rename(columns={'Value': 'Yield'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n return df", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def from_csv(self):\n timestamp_logname = \"from_csv_\" + datetime.today().strftime('%Y_%m_%d_%H_%M_%S')\n csv_files = [f for f in self.args.files if f.endswith('.csv')]\n if not csv_files:\n self.logger.error(\"No CSV files found.\")\n return False\n\n # Create an instance of the Ingestor class with common options set.\n ingestor = Ingestor(**self.options)\n\n # Ingest from each CSV file.\n for csv_file in csv_files:\n data_groups = Ingestor.process_csv(csv_file)\n for mask, routes, deployment_number in data_groups:\n ingestor.load_queue(mask, routes, deployment_number)\n ingestor.ingest_from_queue()\n\n # Write out any failed ingestions from the entire batch to a new CSV file.\n if ingestor.failed_ingestions:\n ingestor.write_failures_to_csv(timestamp_logname)\n\n self.logger.info('')\n self.logger.info(\"Ingestion completed.\")\n return True", "def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def import_hydx(hydx_path):\n hydx = Hydx()\n\n hydxcsvfiles = [\n \"Debiet.csv\",\n \"ItObject.csv\",\n \"Knooppunt.csv\",\n \"Kunstwerk.csv\",\n \"Meta.csv\",\n \"Nwrw.csv\",\n \"Oppervlak.csv\",\n \"Profiel.csv\",\n \"Verbinding.csv\",\n \"Verloop.csv\",\n ]\n implementedcsvfiles = [\n \"Debiet.csv\",\n # \"ItObject1.csv\",\n \"Knooppunt.csv\",\n \"Kunstwerk.csv\",\n # \"Meta1.csv\",\n # \"Nwrw1.csv\",\n \"Oppervlak.csv\",\n \"Profiel.csv\",\n \"Verbinding.csv\",\n \"Verloop.csv\",\n ]\n\n existing_files = []\n for f in hydxcsvfiles:\n csvpath = os.path.join(hydx_path, f)\n if not os.path.isfile(csvpath):\n logger.warning(\n \"The following hydx file could not be found: %s\",\n os.path.abspath(csvpath),\n )\n elif f not in implementedcsvfiles:\n logger.warning(\n \"The following hydx file is currently not implemented in this importer: %s\",\n csvpath,\n )\n else:\n existing_files.append(f)\n\n # TODO check if number of csvfiles loaded is same as number inside meta1.csv\n\n for f in existing_files:\n csvpath = os.path.join(hydx_path, f)\n with open(csvpath, encoding=\"utf-8-sig\") as csvfile:\n csvreader = csv.DictReader(csvfile, delimiter=\";\")\n hydx.import_csvfile(csvreader, f)\n\n hydx.check_import_data()\n\n return hydx", "def load_ae(self, year):\n ae_paths = list(pathlib.Path(config.AE_DIR).glob(f'{year}*ae.txt'))\n assert len(ae_paths) == 1, (f'No AE files found.\\nae_dir={config.AE_DIR}, '\n f'year={year}, ae_paths={ae_paths}')\n ae_data = pd.read_csv(ae_paths[0], sep=' ', index_col=0, \n parse_dates=True, comment='#', \n names=['dateTime', 'AE'])\n return ae_data", "def importAll():\n csvFile = openCsv()\n items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\n\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n\n return items", "def load_data(path):\n filenames = glob.glob(path + \"/*\")\n \n events = DataFrame()\n data = []\n \n for f in filenames:\n data.append(pandas.read_csv(f, index_col=None, header=0))\n \n events = pandas.concat(data)\n return events", "def ImportFromCsvFile(self, csvfilename):\n stories = []\n with open(csvfilename, 'r') as content_file:\n delimiter=','\n quotechar='\"'\n if sys.version_info[0] == 2: #python2\n delimiter = delimiter.encode(\"ascii\")\n quotechar = quotechar.encode(\"ascii\")\n\n reader = csv.reader(content_file, delimiter=delimiter, quotechar=quotechar)\n headerrow = None\n for row in reader:\n if headerrow is None:\n headerrow = row\n continue\n story = Story.FromCsv(row)\n self.stories.append(story)", "def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def ingest_rental_csv(csv_path):\n # Create a CSV import generator (next yields one db row)\n import_generator = import_csv_gen(csv_path)\n # Skip over the title row\n next(import_generator)\n # Iterate over all other rows\n while True:\n try:\n data = next(import_generator)\n if len(data) != 2:\n logger.error(f'Data with incorrect item count: {len(data)}')\n continue\n # extract items from list and add document to database\n with Connection():\n rental = Rental(\n product_id=data[RENTAL_PROD_ID],\n user_id=data[RENTAL_USER_ID]\n )\n rental.save() # This will perform an insert\n except StopIteration:\n break", "def import_files_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)", "def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")", "def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df", "def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def import_csv(self, csvfileobject):\n # Clear previously stored info\n self._tracks = []\n self._selected = None\n\n for row in csvfileobject:\n if row[0] == \"T\":\n track = self.addTrack()\n track.properties = row\n elif row[0] == \"P\":\n period = self.addPeriod([0,1,'-'])\n period.properties = row", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def import_data():\n\tif os.path.exists(\"log.csv\"):\n\t\t#print (\"--training data imported to data frame\\n\")\n\t\tdf = pd.read_csv(\"log.csv\", index_col=0)\n\telse:\n\t\tprint(\"training CSV not found\")\n\t\texit()\n\t\n\treturn df", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def _import_users(admin_access_token, users_csv_file):\n admin = User.query.filter_by(id_=ADMIN_USER_ID).one_or_none()\n if admin_access_token != admin.access_token:\n raise ValueError(\"Admin access token invalid.\")\n csv_reader = csv.reader(users_csv_file)\n for row in csv_reader:\n user = User(\n id_=row[0],\n email=row[1],\n access_token=row[2],\n username=row[3],\n full_name=row[4],\n )\n Session.add(user)\n Session.commit()", "def import_csv(directory_name, collection_file, database):\n LOGGER.debug('Importing %s CSV file...', collection_file)\n count = 0\n errors = 0\n try:\n filename = f'{collection_file}.csv'\n collection = database[collection_file]\n with open(os.path.join(directory_name, filename)) as file:\n collection.insert_many(data_convert(csv.DictReader(file)))\n count = collection.count_documents({})\n except OSError as err:\n print(f'OS error: {err}')\n LOGGER.error('Error reading %s file: %s', collection_file, err)\n errors = 1\n\n return count, errors", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def read_csv():", "def read(self):\n all_files = glob.glob(os.path.join(self.path, \"*.csv\"))\n start_time = datetime.now()\n for file in all_files:\n print(\"\\nImporting file: \" + file + \"\\n\")\n command = \"mongoimport -d ci_311db -c ci_311_incident --type csv --file \" + file + \" --headerline \" \\\n \"--columnsHaveTypes --numInsertionWorkers 4\"\n os.system(command)\n end_time = datetime.now()\n print(\"All CSVs imported in collection.\\nTotal import time: \" + str(end_time - start_time))", "def _csv_import(self, imppath):\n \n self.lookup_table = []\n\n with open(imppath, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n headerorder = []\n\n for i, row in enumerate(csvreader):\n if i == 0:\n headerorder = [s.lower() for s in row]\n\n if rgb.is_hex_color(row[headerorder.index('hexstr')]):\n self.lookup_table.append(DmcColor(hex=rgb.RgbColor(row[headerorder.index('hexstr')].strip()),\n id=row[headerorder.index('dmc')].strip(),\n name=row[headerorder.index('name')].strip()))", "def importData(path_in, random_state):\r\n # load data in\r\n try:\r\n URLdata = pd.read_csv(path_in, index_col=0)\r\n # shuffle the dataset with a random state specified\r\n URLdata = URLdata.sample(frac=1, random_state = random_state).reset_index(drop=True)\r\n\r\n logging.info('top 5 rows of data:\\n {}'.format(URLdata.head()))\r\n logging.info('shape of data: {}'.format(URLdata.shape))\r\n logging.info('There are {} rows in the data'.format(len(URLdata)))\r\n\r\n # find the dataset balance\r\n spam = len(URLdata[URLdata['label']==1])\r\n percent_spam = spam/len(URLdata)*100\r\n logging.info('There are {} in the data'.format(len(URLdata)))\r\n\r\n return URLdata\r\n except FileNotFoundError:\r\n logger.debug('File not found!')\r\n raise Exception('File not found!')", "def import_data_from_config(config):\n\n merge_columns = config[\"import_data\"][\"merge_columns\"]\n\n if not isinstance(merge_columns, list):\n msg = \"merge_columns (if used) must be a list\"\n raise ValueError(msg)\n\n data_out = config[\"import_data\"][\"output_data_directory\"]\n mkdir(data_out)\n\n # Require 'input_data_directories' to be a list\n data_in_list = config[\"import_data\"][\"input_data_directories\"]\n if not isinstance(data_in_list, list):\n msg = \"input_data_directories must be a list\"\n raise ValueError(msg)\n\n target_column = config[\"target_column\"]\n\n for d_in in data_in_list:\n import_directory_csv(d_in, data_out, target_column, merge_columns)", "def run_load(rootpath):\n global CSV_PATH\n CSV_PATH = rootpath+'/csv_files/'\n load_movies_details()\n load_movies_cast()\n load_movies_reviews()", "def enhance(parent_folder):\n parent_folder = Path(parent_folder).resolve()\n address_csv_files = sorted(parent_folder.glob('*.csv'))\n\n print(f'enhancing {len(address_csv_files)} csv files in {parent_folder}')\n\n data = Path(__file__).parent.parent.parent / 'data'\n workspace = (data / 'enhanced' / GDB_NAME).resolve()\n\n arcpy.env.workspace = str(workspace)\n\n for address_csv in address_csv_files:\n job = enhance_data(address_csv)\n\n prepare_output(job)\n convert_to_csv(job)\n remove_temp_tables(job)", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def emp_import():\n\twhile True:\n\t\ttry:\n\t\t\tfile_path = input(\"Enter the path of your file or enter 'quit' to go back to menu.\\n File Path: \")\n\t\texcept FileNotFoundError:\n\t\t\tprint(\"File Not Found Error.\")\n\t\t\tcontinue\n\t\tif file_path == \"quit\":\n\t\t\treturn\n\t\telif not os.path.exists(file_path) and not os.path.isfile(file_path):\n\t\t\tprint(\"Invalid Path.\")\n\t\t\tcontinue\n\t\telif file_path.lower().endswith(('.csv')) == False:\n\t\t\tprint(\"Please Choose a CSV File!\")\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(\"File Found!\")\n\t\t\tbreak\n\tnew_lines = list()\n\tlines = list()\n\twith open(file_path, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tnew_lines.append(row)\n\twith open(\"employees.csv\", 'r') as readFile:\n\t\treader = csv.reader(readFile)\n\t\tnext(reader, None)\n\t\tfor row in reader:\n\t\t\tlines.append(row)\n\tnew_list = new_lines + lines\n\tto_add = set(tuple(row) for row in new_list)\n\twith open('employees.csv', 'w', newline='') as writeFile:\n\t\twriter = csv.writer(writeFile)\n\t\twriter.writerows(to_add)\n\t\tprint(\"Employees Added.\")\n\t\treturn", "def import_data(data_dir, *files):\n added = [0, 0, 0]\n errors = [0, 0, 0]\n fnl_lst = []\n for filepath in files:\n start_time = time.time()\n added = 0\n collection_name = filepath.split(\".\")[0]\n with open(os.path.join(data_dir, filepath)) as file:\n reader = csv.reader(file, delimiter=\",\")\n header = False\n\n for row in reader:\n try:\n if not header:\n header = [h.strip(\"\\ufeff\") for h in row]\n else:\n data = {header[i]:v for i, v in enumerate(row)}\n cursor = db[collection_name]\n cursor.insert_one(data)\n added +=1\n except Exception as e:\n print(e)\n fnl_lst.append((added,0,added,time.time()-start_time))\n return fnl_lst", "def import_experiments_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)", "def __init__(self):\n self.animals_list = []\n self.book = None\n try:\n with open(os.getcwd() + '/animals.csv', 'r') as f:\n read_file = csv.reader(f)\n for row in read_file:\n self.animals_list += row\n except LookupError:\n print(\"Check if 'animals.csv' file is in the main directory.\")", "def load_data(path):\n train = pd.read_csv(os.path.join(path,'train.csv'))\n test = pd.read_csv(os.path.join(path,'test.csv'))\n \n return train, test", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def action_import(self):\n ctx = self._context\n \n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Not a valid file!\"))\n keys = reader_info[0]", "def load(tables: Sequence[Model] = None, directory: str = None):\n tables = tables or models.TABLES\n directory = Path(directory or \"data/csv/\").absolute()\n if not directory.exists():\n raise ValueError(f\"{directory} is not a valid path.\")\n print(f\"Target directory: {directory}\")\n for i, table in enumerate(tables):\n print(f\"{i+1}. Processing {table.table_name()}...\")\n print(f\" Fields: {table.fields()}\")\n _load_table(table=table, directory=directory, format_=\"csv\")", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def import_directory_csv(d_in, d_out, target_column, merge_columns):\n\n INPUT_FILES = grab_files(\"*.csv\", d_in)\n\n if not INPUT_FILES:\n logger.warning(\"No matching CSV files found, exiting\")\n exit(2)\n\n for f_csv in INPUT_FILES:\n f_csv_out = os.path.join(d_out, os.path.basename(f_csv))\n vals = (f_csv, f_csv_out, target_column, merge_columns)\n import_csv(vals)", "def import_test():\n if os.path.exists(\"test.csv\"):\n #print (\"--testing data imported to data frame\\n\")\n test_df = pd.read_csv(\"test.csv\", index_col=0)\n else:\n print(\"training CSV not found\")\n exit()\n \n return test_df", "def runs_loader(path):\n files = sorted(glob.glob(f\"{path}/*_runs.csv\"))\n df_lis = list(range(len(files)))\n for i, f in enumerate(files):\n try:\n df_lis[i] = pd.read_csv(f, sep=\",\", header=0)\n print('Read runs.csv\\n', f, df_lis[i].shape,\n df_lis[i]['dataset__id'][0], df_lis[i]['pipeline__id'][0])\n except Exception as e:\n print(e)\n continue\n df = pd.concat(df_lis, axis=0, sort=False).reset_index()\n # with pd.option_context('display.max_rows', None,\n # 'display.max_columns', None):\n # msg = tabulate.tabulate(df, headers='keys', tablefmt='psql')\n # print(msg)\n return df", "def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list", "def loadData(catalog, accidentsfile):\n accidentsfile = cf.data_dir + accidentsfile\n input_file = csv.DictReader(open(accidentsfile, encoding=\"utf-8\"),\n delimiter=\",\") \n for accident in input_file:\n model.addAccident(catalog,accident)", "def parsing(self, data_path, header=0):\n df_content = []\n csvfiles = glob.glob(data_path)\n selected_cols = list(self.data_features)\n selected_cols.append('workload.type')\n selected_cols.append('workload.appname')\n\n for csv in csvfiles:\n data = pd.read_csv(csv, index_col=None, header=header, usecols=selected_cols)\n data[self.data_features] = self.abnormal_detection(data[self.data_features])\n df_content.append(data.dropna(axis=0))\n self.dataset = pd.concat(df_content, sort=False)", "def import_csv(self):\r\n path = tk.filedialog.askopenfile(initialdir=\"/\", title=\"Select File\",\r\n filetypes=((\"Comma-separated values (.csv)\", \"*.csv\"), (\"Text Document (.txt)\", \"*.txt\"),\r\n (\"All Files\", \"*.*\")))\r\n\r\n items = []\r\n if path is not None:\r\n for ticker in path:\r\n items.append(ticker)\r\n else:\r\n return\r\n\r\n tickers = items[0].split(',')\r\n for ticker in tickers:\r\n self.root.main.get_quote(ticker)", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def ticketing_data_import(file_name, path=path):\n \n accepted_file_names = [\"Clx\", \"Pops\", \"Summer\", \"Chamber\", \"Connections\",\n \"Family\", \"Organ\", \"Specials\"]\n \n if file_name not in accepted_file_names:\n raise ValueError('file_name must be of accepted file types: ', \n accepted_file_names)\n \n files = [pd.read_csv(path + file_name + fy + \".csv\", skiprows=3) for fy in fiscal_years]\n tix_raw = pd.concat(files, ignore_index=True)\n \n return tix_raw", "def importing(files_list):\n\n dataframes = []\n\n for file in files_list:\n imported_df = pd.read_csv(f'full_data/{file}.csv')(file)\n imported_df.columns = imported_df.columns.str.strip().str.lower()\n dataframes.append(imported_df)\n\n return dataframes", "def main():\n # All the files in the uploads/siemens directory\n file_iterator = glob.iglob(os.path.join(VALUE_REPORT_DIRECTORY, \"*.csv\"))\n # Reseeding the database, so we need to look at all values including already imported ones\n include_archive = len(sys.argv) > 1 and sys.argv[1] == 'all'\n if include_archive:\n # Also iterate over all the archived CSVs\n file_iterator = itertools.chain(file_iterator,\n glob.iglob(os.path.join(ARCHIVE_DIRECTORY, \"*.csv\")))\n\n for filename in file_iterator:\n print(filename, file=sys.stderr)\n with open(filename, 'r') as csv_file:\n try:\n reader = csv.reader(csv_file)\n next(reader) # headers\n\n # Gets the given number associated with a point that the values are indexed on\n point_names = save_point_name_index(reader)\n\n next(reader) # Date Range\n next(reader) # Report Timings\n next(reader) # empty\n next(reader) # headers\n\n array_for_json = arrange_value_tuples(reader, point_names)\n\n # API call returns a boolean signifying if the import was successful\n success = post_values(array_for_json)\n # If is was successful and we are not reseeding, we want to move the files to the\n # archives folder so we aren't trying to reimport them every day.\n if success[0] and not include_archive:\n os.system('mv %s %s' % (filename, ARCHIVE_DIRECTORY))\n if success[1] and not include_archive:\n os.system('mv %s %s' % (filename, ARCHIVE_WEIRD_DIRECTORY))\n except Exception as e:\n print()\n print(\"Exception while reading file:\", filename)\n print(e)", "def read_csv_file(self):\n pass", "def read_csv_file(dir_name, csv_file, collection, error_list):\n count = 0\n try:\n filename = os.path.join(dir_name, csv_file)\n with open(filename, 'r') as file:\n csv_reader = csv.DictReader(file)\n # create the document for products collection\n for row in csv_reader:\n collection.insert_one(row)\n except FileNotFoundError:\n LOGGER.info('FileNotFoundError')\n count += 1\n except Exception as error:\n count += 1\n LOGGER.info('Exception:')\n LOGGER.info(error)\n error_list.append(count)", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def import_data(ADCthres=0, switch=False):\n dirname = os.path.dirname(__file__)\n folder = os.path.join(dirname, '../Data/')\n files_in_folder = os.listdir(folder)\n df_tot = pd.DataFrame()\n \n for file in files_in_folder:\n if file[-4:] == '.csv':\n file_path = folder + file\n df = pd.read_csv(file_path, header=None, sep=', ', \n names=['Trigger', 'HighTime', 'Time', 'Bus', \n 'Channel', 'ADC'], engine='python')\n df = df.drop(df.index[0])\n df_tot = pd.concat([df_tot, df])\n \n df_tot['Trigger'] = df_tot['Trigger'].astype(int)\n df_tot['HighTime'] = df['HighTime'].astype(int)\n df_tot['Time'] = df_tot['Time'].astype(int)\n df_tot['Bus'] = df_tot['Bus'].astype(int)\n df_tot['Channel'] = df_tot['Channel'].astype(int)\n df_tot['ADC'] = df_tot['ADC'].astype(int)\n \n df_tot = df_tot[df_tot.ADC > ADCthres]\n \n if switch:\n df_tot = switch_wCh_pairwise(df_tot)\n \n df_tot.reset_index(drop=True, inplace=True)\n return df_tot", "def readFile(filename):\n\twith open(filename, 'rU') as csvIN:\n\t\tnext(csvIN)\n\t\toutCSV=(line for line in csv.reader(csvIN, dialect='excel'))\n\t\tfor row in outCSV:\n e = Entry(row)\n e.pass_import()", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)", "def import_data(catalog='xmatch_TGAS_Simbad.csv', params=None, nrows=None, delimiter=','):\n print \"Loading %s and creating DataFrame..\" % catalog\n df_imported = pd.read_csv(catalog, delimiter=delimiter, header=0, usecols=params, nrows=nrows)\n print \"..Done\\n----------\"\n return df_imported", "def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')", "def load_data(data_dir, atlas, scale):\n\n data_dir = Path(data_dir)\n data = pd.read_csv(data_dir / atlas / f'{scale}.csv', index_col=0)\n\n # drop medial stuff\n todrop = np.array(putils.DROP)[np.isin(putils.DROP, data.index)]\n if len(todrop) > 0:\n data = data.drop(todrop, axis=0)\n\n # get indices of diff hemispheres\n idx_lh = [n for n, f in enumerate(data.index) if 'lh_' in f]\n idx_rh = [n for n, f in enumerate(data.index) if 'rh_' in f]\n\n # get data array\n labels = np.asarray(data.columns)\n data = np.asarray(data).squeeze()\n\n return data[idx_lh], data[idx_rh], labels", "def parse_csv(csv_path):\n song_list = []\n\n try:\n with open(csv_path, encoding='utf-8') as playlist:\n print(\"Parsing \" + csv_path)\n reader = csv.reader(playlist, delimiter=',')\n next(reader) # skip csv header\n for row in reader:\n song_list.append(row[2] + \" - \" + row[1])\n # todo: parse CSV, then check to see which songs already exist in current dir\n # move non-existent results to new list and return that\n except IndexError as error:\n # consider validating playlists when parsing\n # from API on web server instead\n print(str(error))\n \n return song_list", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def import_sitefinder_data(path):\n asset_data = []\n\n site_id = 0\n\n with open(os.path.join(path), 'r') as system_file:\n reader = csv.DictReader(system_file)\n next(reader, None)\n for line in reader:\n if line['Operator'] != 'Airwave' and line['Operator'] != 'Network Rail':\n # if line['Operator'] == 'O2' or line['Operator'] == 'Vodafone':\n # if line['Anttype'] == 'MACRO' or \\\n # line['Anttype'] == 'SECTOR' or \\\n # line['Anttype'] == 'Sectored' or \\\n # line['Anttype'] == 'Directional':\n asset_data.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [float(line['X']), float(line['Y'])]\n },\n 'properties':{\n 'name': 'site_' + str(site_id),\n 'Operator': line['Operator'],\n 'Opref': line['Opref'],\n 'Sitengr': line['Sitengr'],\n 'Antennaht': line['Antennaht'],\n 'Transtype': line['Transtype'],\n 'Freqband': line['Freqband'],\n 'Anttype': line['Anttype'],\n 'Powerdbw': line['Powerdbw'],\n 'Maxpwrdbw': line['Maxpwrdbw'],\n 'Maxpwrdbm': line['Maxpwrdbm'],\n 'Sitelat': float(line['Sitelat']),\n 'Sitelng': float(line['Sitelng']),\n }\n })\n\n site_id += 1\n\n else:\n pass\n\n return asset_data", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = mdb.ImportData(key, tmp_file)\n result = mongo_insert.import_data()\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def readInCSV(csvFile):\n\tprint \"Checking if helper app is installed...\"\n\tandroidCheckAndInstallHelper()\n\ttry:\n\t\tprint \"Will read in the files from %s\" % csvFile\n\t\tstatus = subprocess.call([\"adb\",\"shell\",\"am\",\"startservice\",\n\t\t\t\t\t\t\t\t \"-a\", \"com.synchronoss.androidDev.contactcreaterapp.action.IMPORT\",\n\t\t\t\t\t\t\t\t \"-e\", \"CSV\", csvFile,\n\t\t\t\t\t\t\t\t \"com.synchronoss.androidDev.contactcreaterapp/.CreateAndAddContacts\"],\n\t\t\t\t\t\t\t\t stdout=stdout,stderr=stderr)\n\t\tif (status == 1):\n\t\t\tprint \"Contacts successfully copied from csv on target device.\"\n\t\tif (status != 0):\n\t\t\tprint >>sys.stderr, \"Unable to launch contact adder app\"\n\t\t\tsys.exit()\n\texcept OSError as e:\n\t\tprint >>sys.stderr, \"Execution failed: \", e\n\t\tsys.exit()\n\twaitForHelperApp()", "def test_import_data():\n\n file_path = os.path.join(CONST_ADVANTICSYS_DIR, CONST_ADVANTICSYS_TEST_1)\n\n # Bring df\n success, log, test_ingress_df = advanticsys_import(file_path)\n assert success, log\n assert isinstance(test_ingress_df, pd.DataFrame)\n\n # Test import function\n success, log = import_data(\n test_ingress_df,\n CONST_ADVANTICSYS,\n SQL_USER,\n SQL_PASSWORD,\n SQL_HOST,\n SQL_PORT,\n SQL_TEST_DBNAME\n )\n\n assert success is True, log \n assert log == \"New: 0 (uploaded); Duplicates: 75 (ignored)\"", "def get_raw_data():\n data_files = []\n for i, f in enumerate(os.listdir(config.RAW_DATA_DIR)):\n data_files.append(f)\n print i, \": \", f\n while True:\n try:\n index = int(raw_input(\"Type the index of the data file you'd like to import: \"))\n fn_raw_data = data_files[int(index)]\n break\n except ValueError:\n print(\"Not a valid index. Try again.\")\n except IndexError:\n print(\"Not a valid index. Try again.\")\n print \"Importing %s...\" % fn_raw_data\n with open(config.RAW_DATA_DIR + fn_raw_data) as infile:\n next(infile)\n raw_data = list(csv.DictReader(infile))\n return (fn_raw_data, raw_data)", "def import_data(full=False):\n if(full):\n tweet_pos = pd.read_csv('data/train_pos_full.txt', header = None, sep = \"\\r\\n\", engine = 'python')\n tweet_neg = pd.read_csv('data/train_neg_full.txt', header = None, sep = \"\\r\\n\", engine = 'python')\n else:\n tweet_pos = pd.read_csv('data/train_pos.txt', header = None, sep = \"\\r\\n\", engine = 'python')\n tweet_neg = pd.read_csv('data/train_neg.txt', header = None, sep = \"\\r\\n\", engine = 'python')\n tweet_test = pd.read_csv('data/test_data.txt', header = None, sep = \"\\r\\n\", engine = 'python')\n return tweet_pos, tweet_neg, tweet_test", "def load_exam(options):\n try:\n csvFile = open(options.get('o'), 'rb')\n except IOError as (errno,strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n \n csvReader = reader(csvFile, delimiter=\":\")\n \n for data in csvReader:\n new_exam = Exam.objects.create()\n new_exam.exam_code = data[0]\n new_exam.exam_name = data[1]\n new_exam.save()\n print \"Added ({0} : {1})\".format(data[0], data[1])", "def airline(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'airline.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/Ecdat/Airline.csv'\n maybe_download_and_extract(path, url,\n save_file_name='airline.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata", "def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()", "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def import_datafile(db, infile):\n res = stat(infile)\n mtime = datetime.utcfromtimestamp(res.st_mtime)\n\n hash = md5hash(infile)\n\n data_file = db.model.data_file\n\n # Should maybe make sure error is not set\n rec = db.get(data_file, hash)\n # We are done if we've already imported\n if rec is not None:\n return False\n\n # Values to insert\n cols = dict(\n file_hash=hash,\n file_mtime=mtime,\n basename=infile.stem,\n csv_data=None)\n\n try:\n cols['csv_data'] = extract_datatable(infile)\n except NotImplementedError as e:\n secho(str(e), fg='red', dim=True)\n\n tbl = data_file.__table__\n sql = (insert(tbl)\n .values(file_path=str(infile), **cols)\n .on_conflict_do_update(\n index_elements=[tbl.c.file_path],\n set_=dict(**cols)))\n db.session.execute(sql)\n return True", "def read_data(name: str) -> pd.DataFrame:\n import_dir = Path.cwd().joinpath('eikon_data_files')\n\n path = Path.joinpath(import_dir, Path(name))\n if path.exists():\n return pd.read_csv(path, sep=',')\n else:\n print('File type \"' + name + '.csv' + ' does not exist. Aborted.')\n quit()", "def load_csv(*, path, filename, sep=\"\\t\", verbose=True):\n \n os.chdir(path)\n if len(glob.glob(filename))==1: \n df = pd.read_csv(filename, sep=sep, low_memory=False)\n \n # display example,\n if verbose==True:\n display(df.head(3))\n print(df.shape)\n else:\n pass\n \n # return,\n return df\n \n else:\n if verbose==True:\n print(f\"\"\"ERROR :csv file {filename}, was not found in: \\n {path}\"\"\")\n else:\n pass", "def loadFiles(analyzer,totalFiles):\n for filename in totalFiles:\n if filename.endswith('.csv'):\n print('Cargando archivo: ' + filename)\n loadTrips(analyzer, filename)\n print(\"Cargando información extra...\")\n model.findPopulars(analyzer)\n model.findPopularsAdd(analyzer)\n return analyzer", "def load_all(self, dir_path):\n # each file name corresponds to another date\n input_paths = [os.path.join(dir_path, f) for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path, f)) and f.endswith('.csv')]\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(HDD_data.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(HDD_data.load_single(path) for path in input_paths)\n\n return all_df", "def main():\n parser = ArgumentParser(usage='%(prog)s [options] ecommonsMetadata.csv')\n parser.add_argument(\"-d\", \"--date\", dest=\"date\",\n help=\"Date on or after that an ETD was published for \\\n creating DOIs. Put in format YYYY-MM\")\n parser.add_argument(\"datafile\", help=\"eCommons metadata worked from.\")\n\n args = parser.parse_args()\n\n if not len(sys.argv) > 0:\n parser.print_help()\n parser.exit()\n\n workingdir = csvparse(args.datafile, args.date)\n doiparse(workingdir)\n print('ANVL files available in: ' + workingdir)", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file", "def import_rows(self, csv_file, table_id=None):\n if table_id:\n self.table_id = table_id\n\n params = {'startLine': 1, # skip cols?\n 'encoding': \"UTF-8\",\n 'delimiter': \",\",\n 'isStrict': True}\n\n media = MediaFileUpload(csv_file, mimetype='text/csv', resumable=True)\n self.request = self._table().importRows(tableId=self.table_id, media_body=media, **params)\n self._process_request(name='import_rows', resumable=True)\n \n # URL for new look \n logger.info(\"The fusion table is located at: {}\".format(\n self.build_uri('/view')))\n return True", "def load(input_dir: str) -> None:\n raw_contracts = reader.read_dir(input_dir)\n contracts = tribble.transform.transform(raw_contracts)\n\n LOGGER.info(f'Loading data from {input_dir} in database.')\n loader.load_dataframe(raw_contracts, contract.RawContract)\n loader.load_dataframe(contracts, contract.Contract)\n LOGGER.info('Finished loading data.')", "def _load_training_data(base_dir):\n train_data = pd.read_csv(os.path.join(base_dir, \"train_vale.csv\")).adjclose.values\n return _data_transformation(train_data)", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()" ]
[ "0.64119494", "0.62574255", "0.6136558", "0.60596466", "0.6012882", "0.5962187", "0.5958087", "0.5912588", "0.58974314", "0.5878545", "0.58628714", "0.5797354", "0.57774633", "0.577703", "0.5762553", "0.57592994", "0.57388955", "0.57192475", "0.57123953", "0.5707139", "0.56904113", "0.5681002", "0.56616414", "0.56305915", "0.561907", "0.5616299", "0.5616297", "0.56127244", "0.5609329", "0.56071156", "0.56031096", "0.5601768", "0.5593907", "0.5586855", "0.55780107", "0.55757797", "0.5572211", "0.5571038", "0.5557825", "0.55502635", "0.55467093", "0.5546389", "0.5544756", "0.55323523", "0.55143464", "0.5488751", "0.54863614", "0.54803544", "0.54785377", "0.54772073", "0.5476512", "0.5472432", "0.54706836", "0.54661083", "0.5456441", "0.5445595", "0.54447085", "0.54433495", "0.5439545", "0.54341745", "0.54294395", "0.54289067", "0.54188186", "0.54141515", "0.5409612", "0.54083484", "0.540155", "0.54000926", "0.5398762", "0.5397234", "0.5389584", "0.5389521", "0.53894943", "0.5384312", "0.53822225", "0.5371215", "0.5369982", "0.5361603", "0.53598344", "0.5347874", "0.5344276", "0.5337604", "0.53372985", "0.5337014", "0.53349566", "0.5333768", "0.5332385", "0.5325229", "0.53230834", "0.53169227", "0.53103894", "0.5304067", "0.52979654", "0.52979654", "0.5291912", "0.5291405", "0.5288953", "0.52887845", "0.52842844", "0.5283546" ]
0.7007926
0
Import ASHRAE data with optional caching mechanism.
def get_raw_data(ashrae_dir, cache_file=None, filenames=const.NAMES): cache_file = pathlib.Path(cache_file) if cache_file is not None and cache_file.exists(): data = import_dict_from_cached(cache_file, filenames) else: data = import_data(ashrae_dir) _cache_data(data, cache_file) # Sanity check: the set of building ids should be the same in the train and test sets. assert set(data['train'].building_id) == set(data['test'].building_id) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_(self, data):\n return self.__import(data)", "def load_data(self) -> None:", "def load_data(data_set_key: str, a2e_data_path: str = '../../../a2e-data/data', cache_dir: str = None) -> BearingDataSet:\n\n if a2e_data_path is not None and not a2e_data_path.startswith('http') and not a2e_data_path.startswith('file://'):\n if os.path.isabs(a2e_data_path):\n a2e_data_path = 'file://' + os.path.abspath(a2e_data_path)\n else:\n bearing_module_path = pathlib.Path(__file__).parent.absolute()\n absolute_data_path = os.path.abspath(os.path.join(bearing_module_path, a2e_data_path))\n if os.name == 'nt':\n absolute_data_path = f'/{absolute_data_path}'.replace('\\\\', '/')\n\n a2e_data_path = 'file://' + absolute_data_path\n\n if not os.path.isdir(a2e_data_path.replace('file://', '')):\n a2e_data_path = 'https://github.com/maechler/a2e-data/raw/master/data/'\n\n if cache_dir is None:\n cache_dir = os.path.join(Path.home(), '.a2e')\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n a2e_data_path = a2e_data_path.rstrip('/') + '/'\n data_set_description_origin = f'{a2e_data_path}{data_set_key}.yaml'\n data_set_origin = f'{a2e_data_path}{data_set_key}.csv.gz'\n data_set_description_path = get_file(data_set_key + '.yaml', origin=data_set_description_origin, cache_dir=cache_dir, cache_subdir='datasets/bearing')\n windows = {}\n\n with open(data_set_description_path) as data_set_description_file:\n data_set_description = yaml.load(data_set_description_file, Loader=yaml.FullLoader)\n data_set_path = get_file(data_set_key + '.csv.gz', origin=data_set_origin, cache_dir=cache_dir, cache_subdir='datasets/bearing', file_hash=data_set_description['data']['md5_hash'], hash_algorithm='md5')\n\n with gzip.open(data_set_path, mode='rt') as data_set_file:\n data_frame = pd.read_csv(data_set_file, parse_dates=[data_set_description['data']['index_column']], date_parser=lambda x: timestamp_to_date_time(float(x)), quotechar='\"', sep=',')\n data_frame = data_frame.set_index(data_set_description['data']['index_column'])\n\n for window_key, window_description in data_set_description['windows'].items():\n windows[window_key] = {\n 'mask': (data_frame.index > window_description['start']) & (data_frame.index <= window_description['end']),\n 'label': window_description['label'],\n }\n\n return BearingDataSet(data_set_key, data_frame, windows)", "def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return", "def data_airline():\n return load_airline()", "def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def import_data(ashrae_dir, filenames=const.NAMES):\n print('Importing data from csv')\n ashrae_dir = pathlib.Path(ashrae_dir)\n data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames}\n\n return data", "def load_data(self):", "def _import(self, data):\n if isinstance(data, dict):\n if len(data):\n for key in data:\n if data.get(key) is not None:\n if not self.set(key, data.get(key)):\n raise Exception('%s %s icin dogru bir veri degil.' % (data.get(key), key))", "def _load_data(self):\n\n if not self._cache.exists(config.DATAFRAME_SONG_DATA):\n source_path = os.path.join(config.S3_SONG_DATA, 'A/A/A/*.json') # Note: song database is way big, so we get only a slice of it.\n dataframe = self._get_spark_session().read.json(source_path)\n self._cache.set_source(config.DATAFRAME_SONG_DATA, dataframe)", "def import_data(self, data):\n # Import additional data for tuning\n # data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'\n pass", "def test_import_data():\n\n file_path = os.path.join(CONST_ADVANTICSYS_DIR, CONST_ADVANTICSYS_TEST_1)\n\n # Bring df\n success, log, test_ingress_df = advanticsys_import(file_path)\n assert success, log\n assert isinstance(test_ingress_df, pd.DataFrame)\n\n # Test import function\n success, log = import_data(\n test_ingress_df,\n CONST_ADVANTICSYS,\n SQL_USER,\n SQL_PASSWORD,\n SQL_HOST,\n SQL_PORT,\n SQL_TEST_DBNAME\n )\n\n assert success is True, log \n assert log == \"New: 0 (uploaded); Duplicates: 75 (ignored)\"", "def load_apc(self, apc_path):\n self.apc = pd.read_pickle(os.path.join(self.data_path, 'apc.pick'))", "def action_import(self):\n ctx = self._context\n \n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Not a valid file!\"))\n keys = reader_info[0]", "def importData( self, asset = '', searchAndReplace = ['',''] ):\n\t\tpickleData = pickle.load( open( self.dataPath.path, \"rb\" ) )\n\t\tlayers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l]\n\t\tfor l in layers:\n\t\t\tif not searchAndReplace [0]== '' or not searchAndReplace[1] == '':\n\t\t\t\tl.filterMe( asset, searchAndReplace )\n\t\t\tl.create()\n\t\t\tl.addObjects()\n\t\t\tl.makeOverrides()\n\t\t\tl.makeOverrideConnections()\n\t\t\tl.makeShaderOverride()", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def import_sitefinder_data(path):\n asset_data = []\n\n site_id = 0\n\n with open(os.path.join(path), 'r') as system_file:\n reader = csv.DictReader(system_file)\n next(reader, None)\n for line in reader:\n if line['Operator'] != 'Airwave' and line['Operator'] != 'Network Rail':\n # if line['Operator'] == 'O2' or line['Operator'] == 'Vodafone':\n # if line['Anttype'] == 'MACRO' or \\\n # line['Anttype'] == 'SECTOR' or \\\n # line['Anttype'] == 'Sectored' or \\\n # line['Anttype'] == 'Directional':\n asset_data.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [float(line['X']), float(line['Y'])]\n },\n 'properties':{\n 'name': 'site_' + str(site_id),\n 'Operator': line['Operator'],\n 'Opref': line['Opref'],\n 'Sitengr': line['Sitengr'],\n 'Antennaht': line['Antennaht'],\n 'Transtype': line['Transtype'],\n 'Freqband': line['Freqband'],\n 'Anttype': line['Anttype'],\n 'Powerdbw': line['Powerdbw'],\n 'Maxpwrdbw': line['Maxpwrdbw'],\n 'Maxpwrdbm': line['Maxpwrdbm'],\n 'Sitelat': float(line['Sitelat']),\n 'Sitelng': float(line['Sitelng']),\n }\n })\n\n site_id += 1\n\n else:\n pass\n\n return asset_data", "def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))", "def importer():\n pass", "def load_data(\n cache_dir: Optional[str] = None,\n) -> tuple[client_data.ClientData, client_data.ClientData]:\n database_path = download.get_compressed_file(\n origin='https://storage.googleapis.com/tff-datasets-public/shakespeare.sqlite.lzma',\n cache_dir=cache_dir,\n )\n train_client_data = sql_client_data.SqlClientData(\n database_path, split_name='train'\n ).preprocess(_add_parsing)\n test_client_data = sql_client_data.SqlClientData(\n database_path, split_name='test'\n ).preprocess(_add_parsing)\n return train_client_data, test_client_data", "def importAbc(self, parent_transform=True):\n self.logger.info(\"Import Alembic\")\n\n if self.data['cacheFileNameAttr'] != '':\n if os.path.isfile(self.data['cacheFileNameAttr']):\n\n # now try the abcImport\n try:\n if parent_transform:\n cmds.AbcImport(self.data['cacheFileNameAttr'], reparent=self.data['transformNode'])\n self.logger.debug(\"Parenting to %s \" % self.data['transformNode'])\n else:\n cmds.AbcImport(self.data['cacheFileNameAttr']) \n\n self.logger.info(\"Imported : %s\" % self.data['cacheFileNameAttr'])\n return True\n\n except Exception, e:\n self.logger.error(\"Import Alembic Error : %s\" % e)\n return False\n else:\n self.logger.error(\"Missing file : %s\" % self.data['cacheFileNameAttr'])\n return False\n else:\n self.logger.info(\"Empty attribute : %s.cacheFileNames\" % self.data['shapeNode'])\n return False", "def on_import(self, event=None):\n if event is not None:\n event.Skip()\n data_id, theory_id, state_id = self.set_data_helper()\n temp = data_id + state_id\n self.parent.set_data(data_id=temp, theory_id=theory_id)", "def import_data(self, keyname, data):\n return self.database.jsonset(keyname, Path.rootPath(), data)", "def __init__(self, filename = None, dbalias = None, hltpskey = None ):\n super(HLTPrescalesSetAccess,self).__init__( ConfigType.HLTPS, mainkey = \"prescales\",\n filename = filename, dbalias = dbalias, dbkey = hltpskey )\n self.loader.setQuery([\n \"SELECT HPS_DATA FROM {schema}.HLT_PRESCALE_SET WHERE HPS_ID={dbkey}\" # for current and new db schema\n ])\n self.load()", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def load_data(self):\n raise NotImplementedError()", "def importAovs(self):\n\t\tLayersInfo = pickle.load( open( self.aovsPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tfor ao in LayersInfo.keys():\n\t\t\taov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] )\n\t\tmc.refresh( su = 0 )", "def load_data(self, read_shelf):\n if read_shelf:\n try:\n # Attempt reading pre-shelved objects first\n self.__read_shelf()\n except Exception as e:\n print(f'Exception while reading the data shelf ({e})')\n # Otherwise, read data from the the json files\n self.__read_json()\n else:\n self.__read_json()", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def load_ae(self, year):\n ae_paths = list(pathlib.Path(config.AE_DIR).glob(f'{year}*ae.txt'))\n assert len(ae_paths) == 1, (f'No AE files found.\\nae_dir={config.AE_DIR}, '\n f'year={year}, ae_paths={ae_paths}')\n ae_data = pd.read_csv(ae_paths[0], sep=' ', index_col=0, \n parse_dates=True, comment='#', \n names=['dateTime', 'AE'])\n return ae_data", "def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.inventory = data['inventory']", "def import_db(import_file):\n import_data(import_file)", "def __init__(self, load_data=True):\n if not load_data:\n return\n\n # Load data files\n data_paths = [\"app_data\", \"..app_data\"]\n adp = None\n\n for data_path in data_paths:\n if os.path.exists(os.path.join(os.curdir, data_path)):\n adp = os.path.join(os.curdir, data_path)\n\n if not adp:\n _logger.error(\"app data path not found.\")\n return\n\n # load all files in the rest_api/app_data path.\n files = glob(os.path.join(adp, \"*\"))\n for filename in files:\n\n if filename.endswith(\".txt\"):\n key = os.path.basename(filename).replace(\".txt\", \"\")\n # decode and encode are required for the cryillic names in the files.\n self._app_data[key] = [\n line.strip().decode(\"utf-8\") for line in open(filename, \"rb\").readlines()\n ]\n\n if filename.endswith(\".csv\"):\n key = os.path.basename(filename).replace(\".csv\", \"\")\n self._app_data[key] = list(csv.DictReader(open(filename)))\n\n if filename.endswith(\".json\"):\n key = os.path.basename(filename).replace(\".json\", \"\")\n self._app_data[key] = json.loads(open(filename).read())", "def _import_data(self, data, base_url, endpoint, timezone_offset=None, ignore_alias=False, dataset_id=None,\n dataset_version=None, raw_record_import=False):\n assert self.token, \"Project token required for import!\"\n if self.dataset_id or dataset_version:\n if not (dataset_id and dataset_version):\n Mixpanel.LOGGER.warning('Both dataset_id AND dataset_version are required')\n return\n\n # Create a list of arguments to be used in one of the _prep functions later\n args = [{}, self.token]\n\n item_list = Mixpanel._list_from_argument(data)\n if not raw_record_import:\n if endpoint == 'import' or endpoint == 'import-events':\n args.append(timezone_offset)\n elif endpoint == 'engage' or endpoint == 'import-people':\n args.extend(['$set', lambda profile: profile['$properties'], ignore_alias, True])\n else:\n args = None\n\n self._dispatch_batches(base_url, endpoint, item_list, args, dataset_id=dataset_id,\n dataset_version=dataset_version)", "def load_data_reader(data_reader=\"SpreadsheetDataReader\"):\n return importlib.import_module('c302.%s'%data_reader)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def import_(cls, data_id, type_,*args, **kwargs):\n data_path = cls.my_data_path(data_id)\n config = cls.generate_config(data_path,*args,**kwargs)\n task_id = Task.register(Task.TYPE.DATA_IMPORT)\n DataImport(data_id= data_id, task_id= task_id, type_l= type_, config= config).save()\n Task.fire(task_id)", "def load_data(path):\n\n columns = ['Item Year', 'Original Value', 'Standard Value', 'Original Currency',\n 'Standard Currency', 'Orignal Measure', 'Standard Measure', 'Location',\n 'Commodity']\n col_type = [int, float, float, object, object, object, object, object]\n\n col_type_dict = dict(zip(columns, col_type))\n\n au_df = pd.read_csv(path, usecols=columns)\n au_df = au_df.astype(col_type_dict)\n au_df.name = 'AU_data'\n \n return au_df, columns", "def import_from(self, importer=None):\n if not importer:\n raise aspecd.exceptions.MissingImporterError(\"No importer provided\")\n importer.import_into(self)\n self._origdata = copy.deepcopy(self.data)", "def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)", "def advanticsys_import(blobin: func.InputStream):\r\n\r\n logging.info(\r\n f\"Starting advanticsys sensor data import process:\\n\"\r\n f\"Name: {blobin.name}\\n\"\r\n f\"Blob Size: {blobin.length} bytes\"\r\n )\r\n\r\n # reading in data as pandas dataframe\r\n data_str = str(blobin.read(), \"utf-8\")\r\n data_stream = StringIO(data_str)\r\n data_df = pd.read_csv(data_stream)\r\n\r\n # getting the environmental parameters\r\n user = \"{}\".format(os.environ[\"CROP_SQL_USER\"].strip())\r\n password = \"{}\".format(os.environ[\"CROP_SQL_PASS\"].strip())\r\n host = \"{}\".format(os.environ[\"CROP_SQL_HOST\"].strip())\r\n port = \"{}\".format(os.environ[\"CROP_SQL_PORT\"].strip())\r\n database = \"{}\".format(os.environ[\"CROP_SQL_DBNAME\"].strip())\r\n\r\n # uploading data to the database\r\n status, log = import_data(\r\n data_df, CONST_ADVANTICSYS, user, password, host, port, database\r\n )\r\n\r\n # Logging the advanticsys sensor data upload event\r\n conn_string = make_conn_string(SQL_ENGINE, user, password, host, port)\r\n\r\n log_status, log_err = log_upload_event(CONST_ADVANTICSYS, blobin.name, status, log, conn_string)\r\n\r\n if status:\r\n\r\n logging.info(\r\n f\"SUCCESS: advanticsys sensor data import process finished:\\n\"\r\n f\"Name: {blobin.name}\\n\"\r\n f\"Blob Size: {blobin.length} bytes\\n\"\r\n f\"Info: {log}\\n\"\r\n f\"Log: {log_status} {log_err}\"\r\n )\r\n\r\n else:\r\n\r\n logging.info(\r\n f\"ERROR: advanticsys sensor data import process failed:\\n\"\r\n f\"Name: {blobin.name}\\n\"\r\n f\"Blob Size: {blobin.length} bytes\\n\"\r\n f\"Info: {log}\\n\"\r\n f\"Log: {log_status} {log_err}\"\r\n )", "def load_tas_lookup():\n logger.info('Loading TAS')\n load_tas()", "async def async_step_import(self, data):\n self.network_key = data.get(CONF_NETWORK_KEY)\n self.usb_path = data.get(CONF_USB_PATH)\n return await self.async_step_user()", "def import_realia(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('git pull')\n run('cd import_scripts;../bin/python import_realia.py load_fixture')\n run('bin/django update_index dasa.Realia')", "def load(self, filename=None):\n importer = aspecd.io.AdfImporter()\n importer.source = filename\n importer.import_into(self)", "def __lazy_load(self, lazy_load_filename: str, allow_write: bool) -> None:\n self.__hdf5_store = h5py.File(lazy_load_filename, 'r+' if allow_write else 'r')\n self.version = self.__hdf5_store[\"version\"][()]\n try:\n if self.version == b\"unstable\" or self.version == \"unstable\":\n warnings.warn(\"Experiment file was created with development version of analysis code. Trying to \"\n \"load as version 1\")\n self.version = \"0\"\n elif int(self.version) > 2:\n self.__hdf5_store.close()\n self.__hdf5_store = None\n raise IOError(f\"File version {self.version} is larger than highest recognized version '2'\")\n except ValueError:\n self.__hdf5_store.close()\n self.__hdf5_store = None\n raise IOError(f\"File version {self.version} not recognized\")\n # load general experiment data\n n_planes = self.__hdf5_store[\"n_planes\"][()]\n self.experiment_name = self.__hdf5_store[\"experiment_name\"][()]\n self.original_path = self.__hdf5_store[\"original_path\"][()]\n self.scope_name = self.__hdf5_store[\"scope_name\"][()]\n self.comment = self.__hdf5_store[\"comment\"][()]\n self.tail_frame_rate = self.__hdf5_store[\"tail_frame_rate\"][()]\n # load singular parameter dictionary\n self.info_data = self._load_dictionary(\"info_data\", self.__hdf5_store)\n # load tail-data modification flag if this is version 2\n if int(self.version) > 1:\n self.tail_data_augmented = self.__hdf5_store[\"tail_data_augmented\"][()]\n # load some per-plane data leave larger objects unloaded\n for i in range(n_planes):\n plane_group = self.__hdf5_store[str(i)]\n self.scanner_data.append(self._load_dictionary(\"scanner_data\", plane_group))\n self.projections.append(plane_group[\"projection\"][()])\n if \"anat_projection\" in plane_group: # test if this experiment was dual-channel\n self.anat_projections.append(plane_group[\"anat_projection\"][()])\n if \"tail_data\" in plane_group: # test if this experiment had tail data (for all planes)\n self.bout_data.append(plane_group[\"bout_data\"][()])\n self.tail_frame_times.append(plane_group[\"tail_frame_time\"][()])\n self.all_centroids.append(plane_group[\"centroids\"][()])\n self.all_sizes.append(plane_group[\"sizes\"][()])\n self.all_spatial.append(plane_group[\"spatial\"][()])\n ps = plane_group[\"mcorr_dict\"][()]\n self.mcorr_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_extract_dict\"][()]\n self.cnmf_extract_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_val_dict\"][()]\n self.cnmf_val_dicts.append(json.loads(ps))\n # update lazy load stores for non-loaded objects\n self.__tail_data = LazyLoadObject(self.__hdf5_store, \"tail_data\")\n self.__replaced_tail_frames = LazyLoadObject(self.__hdf5_store, \"replaced_tail_frames\")\n self.__laser_data = LazyLoadObject(self.__hdf5_store, \"laser_data\")\n self.__all_c = LazyLoadObject(self.__hdf5_store, \"C\")\n self.__all_dff = LazyLoadObject(self.__hdf5_store, \"dff\")\n self.__func_stacks = LazyLoadObject(self.__hdf5_store, \"func_stack\")\n self.lazy = True", "def test_import_process(self):\r\n good_file = self._get_google_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._google_data_test()", "async def async_step_import(self, import_data: dict[str, str]) -> FlowResult:\n import_source = import_data.pop(\"import_source\")\n if import_source == \"geography_by_coords\":\n return await self.async_step_geography_by_coords(import_data)\n return await self.async_step_geography_by_name(import_data)", "def importShaders(self, namespace=':'):\n self.logger.info(\"Import Shaders\")\n\n if self.data['abcShadersAttr']:\n\n abcfile = self.data['abcShadersAttr']\n \n # shotgun query for maya file\n mayafile = find_shader_package_from_shader_file(file_path=abcfile, file_type='ma')\n if mayafile != {}:\n mayafile = mayafile['ma']\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n localfile = abcfile.replace('.abc', '.ma')\n if os.path.isfile(localfile):\n mayafile = localfile\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n\n if os.path.isfile(mayafile):\n try: \n imported_shaders = cmds.file(mayafile, i=True, returnNewNodes=True, renameAll=True, mergeNamespacesOnClash=True, namespace=namespace)\n self.setAttr(\"abcShaders\", \"\")\n self.logger.debug(\"Imported under %s namespace\" % namespace)\n\n # reset selection back to alembicHolder\n cmds.select(self.data['shapeNode'])\n self.logger.info(\"Imported : %s\" % self.data['abcShadersAttr'])\n return True\n\n except Exception, e:\n self.logger.error(\"Import Json Error : %s\" % e)\n return False\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n else:\n self.logger.info(\"Empty attribute : %s.abcShadersAttr\" % self.data['shapeNode'])\n return False", "def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._chrome_data_test()", "def loadData():\n project_dir = \"/home/c/chandanchowdhury/Documents/CIS-833/CSSearch/indexer/\"\n\n index_file = \"index_file.pkl\"\n link_file = \"link_file.pkl\"\n\n index_data = loadPickle(project_dir+index_file)\n link_data = loadPickle(project_dir+link_file)\n\n return index_data, link_data", "def _setup_all_awardees():\n hpo_data = _prep_awardee_csv_data('tests/test-data/fixtures/awardees.csv')\n org_data = _prep_awardee_csv_data('tests/test-data/fixtures/organizations.csv')\n site_data = _prep_awardee_csv_data('tests/test-data/fixtures/sites.csv')\n dao = HPODao()\n #\n # Import HPO records\n #\n for column in range(0, len(hpo_data[0]) - 1):\n data = _convert_csv_column_to_dict(hpo_data, column)\n dao.insert(HPO(hpoId=column+1, displayName=data['Name'], name=data['Awardee ID'],\n organizationType=OrganizationType(data['Type']), isObsolete=ObsoleteStatus.ACTIVE))\n #\n # Import Organization records\n #\n with dao.session() as session:\n for column in range(0, len(org_data[0]) - 1):\n data = _convert_csv_column_to_dict(org_data, column)\n result = session.query(HPO.hpoId).filter(HPO.name == data['Awardee ID']).first()\n dao.insert(Organization(externalId=data['Organization ID'], displayName=data['Name'], hpoId=result.hpoId))\n #\n # Import Site records\n #\n with dao.session() as session:\n for column in range(0, len(site_data[0]) - 1):\n data = _convert_csv_column_to_dict(site_data, column)\n result = session.query(Organization.hpoId, Organization.organizationId).\\\n filter(Organization.externalId == data['Organization ID']).first()\n try:\n mayo_link_id = data['MayoLINK Client #']\n except KeyError:\n mayo_link_id = str(random.randint(7040000, 7999999))\n dao.insert(Site(siteName=data['Site'], googleGroup=data['Site ID / Google Group'].lower(),\n mayolinkClientNumber=mayo_link_id, hpoId=result.hpoId,\n organizationId=result.organizationId))", "def load_index_from_cache(self):\n cache = open(self.cache_path_index, 'r')\n json_index = cache.read()\n self.index = json.loads(json_index)", "def load_raw_data(apps, schema_editor):\n from season.import_raw_data import InitialDataProcessor\n matches_path = str(BASE_DIR) + '/season/migrations/matches.csv'\n deliveries_path = str(BASE_DIR) + '/season/migrations/deliveries.csv'\n # Initialization path to read data\n load_data = InitialDataProcessor(matches_path=matches_path, deliveries_path=deliveries_path)\n # transform data frame and save the data step by step\n # only support new season import for the first tym when data structure is ready to use\n load_data.transform_input_save()", "def _import_bh_(self):", "def ImportData(config):\n # -----------<<< Setting constant values that are to be used inside function >>>----------- #\n AccessDataFrom = config['DataCollection']['GetDataFrom']\n if AccessDataFrom == 'BQ':\n SettingToUse = config['IterationAim']['CycleType']\n if SettingToUse: GlTestDataSize = int(config['IterationAim']['GlTest_DataGrabWindow_Hr'])\n FileLocalSavingName = config['InputPaths']['BQ_RawDataStoringName'].format(SettingToUse)\n GetNewCopy = config['DomainConfig']['BQ_GetNewCopyOfData']\n elif AccessDataFrom == 'Storage':\n FileName = config['InputPaths']['Storage_RawData']\n else:\n print('Wrong setting in \"GetDataFrom\", current value is {}'.format(AccessDataFrom))\n txt = 'Exception: Wrong Configuration has been passed in \"GetDataFrom\".'\n AddRecommendation(txt, config)\n raise Exception(txt)\n LevBasedPrint('Inside \"'+ImportData.__name__+'\" function and configurations for this has been set.',1,1)\n \n \n LevBasedPrint('Accessing data from {}'.format(AccessDataFrom), 1)\n # ----------------------------<<< Accessing Data from BQ >>>------------------------------- #\n if AccessDataFrom == 'BQ':\n \n # -----------------------<<< Setting Configuration for GlTest >>>-------------------------- #\n if(SettingToUse == 'GlTest'):\n config['IfStatic']['DataGrabWindow_Days'] = str(int(GlTestDataSize/24 + 1))\n config['IfDynamic']['DataGrabWindow_Hr'] = str(GlTestDataSize + 1)\n\n # --------------------------<<< Get New Copy Of Data Or Reuse >>>-------------------------- #\n if (os.path.exists(FileLocalSavingName) == False) | (GetNewCopy in ['True', 'true', 'T', 't', 'Yes', 'yes', 'Y', 'y']):\n DF = GrabAnySizeDatafromGoogleBQ(config)\n # if(SettingToUse == 'GlTest'):\n # DF.drop(DF[DF.BinsBackFromCurrent != 'Bin_0'].index, inplace=True)\n # DF.reset_index(drop=True, inplace=True)\n DF.to_csv(FileLocalSavingName, index=False)#, sep='|', encoding='utf-8')\n LevBasedPrint('Data extracted from BQ and saved locally to the File: '+ FileLocalSavingName, 1)\n else:\n DF = pd.read_csv(FileLocalSavingName)#, sep='|', encoding='utf-8')\n LevBasedPrint('Data Loaded From the File: '+ FileLocalSavingName, 1)\n LevBasedPrint('Data Shape: '+str(DF.shape), 1 )\n # --------------------------<<< Accessing Data from Storage >>>---------------------------- #\n elif AccessDataFrom == 'Storage':\n DF = pd.read_csv(FileName)#, sep='|', encoding='utf-8')\n LevBasedPrint('Data Loaded From the File: '+ FileName, 1)\n \n # ---------------------------------------<<< xyz >>>--------------------------------------- #\n LevBasedPrint('Data Import | Complete',1)\n LevBasedPrint('',1,1)\n return DF\n # ------------------------------------------------------------------------------------------- #", "def load(a_path, a_data):\n analyzer = load(a_data)\n analyzer._load(a_path)\n return analyzer", "def _import(format, input, config):\n if input:\n with open(input, 'rb') as f:\n data = f.read()\n else:\n data = sys.stdin.read()\n\n dataset = tablib.Dataset()\n setattr(dataset, format, data)\n\n _add_changelogs(config, dataset.dict)", "def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def zoo_import(name, head=''):\n net = gz.get_model(name, pretrained=True)\n export_block(head + name, net, preprocess=True)", "def start_import(data_import):\n\tdata_import = frappe.get_doc(\"Data Import Beta\", data_import)\n\ti = Importer(data_import.reference_doctype, data_import=data_import)\n\treturn i.import_data()", "def import_data(self, reload = False):\n if self.verbose > 1:\n print(\"SpectraTools.Hitran.import_data()\") \n \n if reload:\n if self.verbose > 0:\n print(\"SpectraTools.Hitran.import_data(): downloading data (reload == True)\") \n self.import_data_helper()\n else:\n try:\n nu = hapi.getColumn(self.tablename, 'nu')\n if self.verbose > 1:\n print(numpy.amin(nu), numpy.amax(nu))\n if numpy.amin(nu) <= self.min_x or numpy.amax(nu) >= self.max_x: \n if self.verbose > 0:\n print(\"SpectraTools.Hitran.import_data(): downloading data (new range)\")\n self.import_data_helper()\n else:\n if self.verbose > 0:\n print(\"SpectraTools.Hitran.import_data(): no need to download data\") \n \n except KeyError:\n if self.verbose > 0:\n print(\"SpectraTools.Hitran.import_data(): downloading data (new data)\")\n self.import_data_helper()", "def load_from_datastore(archives=False):\n\n # This shouldn't happen often (should only happen when memcache has\n # been completely evicted), but we still want to be as fast as\n # possible.\n\n bingo_cache = BingoCache()\n\n if archives:\n # Disable cache writes if loading from archives\n bingo_cache.storage_disabled = True\n\n experiment_dict = {}\n alternatives_dict = {}\n\n # Kick both of these off w/ run() so they'll prefetch asynchronously\n experiments = _GAEBingoExperiment.all().filter(\n \"archived =\", archives).run(batch_size=400)\n alternatives = _GAEBingoAlternative.all().filter(\n \"archived =\", archives).run(batch_size=400)\n\n for experiment in experiments:\n experiment_dict[experiment.name] = experiment\n\n alternatives = sorted(list(alternatives), key=lambda alt: alt.number)\n\n for alternative in alternatives:\n if alternative.experiment_name not in alternatives_dict:\n alternatives_dict[alternative.experiment_name] = []\n alternatives_dict[alternative.experiment_name].append(alternative)\n\n for experiment_name in experiment_dict:\n ex, alts = (experiment_dict.get(experiment_name),\n alternatives_dict.get(experiment_name))\n if ex and alts:\n bingo_cache.add_experiment(ex, alts)\n\n # Immediately store in memcache as soon as possible after loading from\n # datastore to minimize # of datastore loads\n bingo_cache.store_if_dirty()\n\n return bingo_cache", "def import_data_model(directory):\n analyses = pd.read_excel(directory + 'analyses.xlsx')\n analytes = pd.read_excel(directory + 'analytes.xlsx')\n for index, analysis in analyses.iterrows():\n analyte_data = []\n analyte_names = analysis.analyte_keys.split(', ')\n for analyte_key in analyte_names:\n analyte_item = analytes.loc[analytes.key == analyte_key]\n analyte_data.append(analyte_item.to_dict(orient='records'))\n analyses.at[index, 'analytes'] = analyte_data \n analyses_data = analyses.to_dict(orient='records')\n for index, values in analyses_data.iterrows():\n doc_id = str(values.key)\n doc_data = values.to_dict()\n ref = ''\n update_document(ref, doc_data)\n # doc_data = data.to_dict(orient='index')\n # data_ref = create_reference(db, ref)\n # data_ref.document(doc_id).set(doc_data, merge=True)\n # data_ref.set(doc_data, merge=True)\n\n return NotImplementedError", "def tcga_read_data():\n print(\"Downloading data ...\")\n try:\n os.mkdir(cache_directory)\n except FileExistsError:\n pass\n \n brca_path= os.path.join(cache_directory, expression_matrix_name + \".tsv.gz\")\n brca_clin_path = os.path.join(cache_directory, phenotype_name + \".tsv.gz\")\n try:\n brca = pd.read_csv(brca_path, sep=\"\\t\",index_col=0)\n brca_clin = pd.read_csv(brca_clin_path, sep=\"\\t\",index_col=0)\n return brca, brca_clin\n except:\n pass\n\n # Download TCGA data\n tcga_path = os.path.join(cache_directory, tcga_name + \".tar.gz\")\n download_file_if_not_present(tcga_url, tcga_path)\n print(\"Tar-file inplace, extracting tables.\")\n\n # Decompress data into tables\n tf = tarfile.open(tcga_path)\n tf.extract(expression_name, cache_directory)\n tf.extract(clinical_name, cache_directory)\n\n # def get_expression_data(self, path, file):\n df = pd.read_csv(os.path.join(cache_directory, expression_name), sep=\"\\t\")\n df.dropna(axis=0, how='any', inplace=True)\n df.set_index('Entrez_Gene_Id', inplace=True)\n # df.drop(columns=['Unnamed: 0', 'Entrez_Gene_Id'], inplace=True)\n # df.drop(columns=['Entrez_Gene_Id'], inplace=True)\n df.drop(columns=['Hugo_Symbol'], inplace=True)\n brca = df.reindex(sorted(df.columns), axis=1)\n\n # get_clinical_data(brca_clin_path,\"data_clinical_sample.txt\")\n df = pd.read_csv(os.path.join(cache_directory, clinical_name), sep=\"\\t\").T\n df.columns = df.loc[\"Sample Identifier\"]\n df.drop(columns=[\"A unique sample identifier.\",\"STRING\",\"1\",\"SAMPLE_ID\"], inplace=True,errors='ignore')\n if 'TCGA-BH-A1ES-01' in df.columns:\n df.drop(columns=['TCGA-BH-A1ES-01'], inplace=True)\n df.drop(index=[\"Unnamed: 0\",\"#Patient Identifier\",\"Sample Identifier\",\"Other Sample ID\"], inplace=True,errors='ignore')\n brca_clin = df.reindex(sorted(df.columns), axis=1)\n\n brca_clin, brca_clin = tcga_tn_preprocess(brca, brca_clin)\n\n # Put the extracted matrixes to the file cashe, so we do not have to do this again if procedure is repeated.\n brca.to_csv(brca_path, sep=\"\\t\")\n brca_clin.to_csv(brca_clin_path, sep=\"\\t\")\n return brca, brca_clin", "def dataload():\n\t\n\tglobal A, B, fnA, fnB, lPcnA, lPcnB\n\t\n\tdwd = os.getcwd() # Data WD\n\t\t\n\t# First sample A is loaded. This is the \"calibrating\" sample.\n\t# In this case it is the OGLE III LMC small amplitude RGs.\n\t\n\tfnA = '/LMC-CalSample-cleaned_2.fits'\n\tA = Table.read(dwd+fnA)\n\n\t# Then sample B is loaded. For comparison/testing purposes, this is\n\t# again the OGLE III LMC SARGs.\n\t\n\tfnB = '/LMC-CalSample-cleaned_2.fits'\n\tB = Table.read(dwd+fnB)\n\t\n\t\"\"\" Fix tables so only the stars with all three good periods are \n\tconsidered. \"\"\"\n\t\n\tlPcnA = get_logPcn(A)\n\tlPcnB = get_logPcn(B)\n\t\n\tfor cn in lPcnA:\n\t\tA = A[A[cn]>0]\n\tfor cn in lPcnB:\n\t\tB = B[B[cn]>0]", "def import_aa_data(anime_list):\n for anime, atog in anime_list:\n db.session.add(anime)\n for genre in atog:\n db.session.add(genre)\n\n db.session.commit()", "def fixture_example_data():\n import_example_data()", "def setUp(self):\n client = utils.create_test_datastore_client()\n self.resource = import_attempt.ImportAttemptByID(client)\n list_resource = import_attempt_list.ImportAttemptList(client)\n run_list_resource = system_run_list.SystemRunList(client)\n attempts = [{\n _ATTEMPT.provenance_url: 'google.com'\n }, {\n _ATTEMPT.provenance_url: 'facebook.com'\n }, {\n _ATTEMPT.provenance_url: 'bing.com'\n }]\n self.attempts = utils.ingest_import_attempts(run_list_resource,\n list_resource, attempts)", "def _install(args, use_cache, debug):\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n if args['dataset'].endswith('.zip') or args.get('hash_value'):\n path_to_archive = args['dataset']\n if args.get('hash_value'):\n path_to_archive = os.path.join(\n PROVENANCE_DIR, args['dataset'],\n '{}-{}.zip'.format(args['dataset'], args['hash_value']))\n if not os.path.exists(path_to_archive):\n print('The committed file does not exist.')\n engine = install_committed(path_to_archive,\n engine,\n force=args.get('force', False))\n return engine\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n print(\"=> Installing\", data_sets_script.name)\n try:\n if engine.name == \"HDF5\":\n sqlite_opts = {\n 'command': 'install',\n 'dataset': data_sets_script,\n 'engine': 'sqlite',\n 'file': (args[\"file\"].split(\".\"))[0] + \".db\",\n 'table_name': args[\"table_name\"],\n 'data_dir': args[\"data_dir\"]\n }\n sqlite_engine = choose_engine(sqlite_opts)\n data_sets_script.download(sqlite_engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n elif args['dataset'].startswith('socrata') and not data_sets_scripts:\n socrata_id = args['dataset'].split('-', 1)[1]\n resource = find_socrata_dataset_by_id(socrata_id)\n\n if \"error\" in resource.keys():\n if resource[\"datatype\"][0] == \"map\":\n print(\"{} because map type datasets are not supported\".format(\n resource[\"error\"]))\n else:\n print(\"{} because it is of type {} and not tabular\".format(\n resource[\"error\"], resource[\"datatype\"][1]))\n elif len(resource.keys()) == 0:\n return\n else:\n print(\"=> Installing\", args['dataset'])\n name = f\"socrata-{socrata_id}\"\n create_socrata_dataset(engine, name, resource)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n elif args['dataset'].startswith('rdataset') and not data_sets_scripts:\n print(\"=> Installing\", args['dataset'])\n rdataset = args['dataset'].split('-')\n update_rdataset_catalog()\n package, dataset_name = rdataset[1], rdataset[2]\n create_rdataset(engine, package, dataset_name)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n else:\n message = \"Run retriever.datasets() to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine", "def load(self):", "def prepare_anndata(anndata, batch_size, shuffle=False):\n # Add shuffling here\n if sparse.issparse(anndata.X):\n data = anndata.X.A\n else:\n data = anndata.X\n data = torch.Tensor(data)\n my_dataloader = torch.utils.data.DataLoader(data, shuffle=shuffle, batch_size=batch_size)\n return my_dataloader", "def load_data(self):\n df= self.read_file()\n for row,col in df.iterrows():\n Employeeid = int(col['Empolyeeid'])\n Employee_Name = col['Employee_Name']\n Age = col['Age']\n Salary = col['Salary']\n self.table.put_item(\n Item={\n \"Employeeid\":Employeeid,\n \"Employee_Name\": Employee_Name,\n \"Age\": Age,\n \"Salary\": Salary\n }\n )\n return True", "def load_data(self):\n super(SubjectRAMEventsData, self).load_data()\n\n # also load electrode info\n self.elec_info = ecog_helpers.load_elec_info(self.subject, self.montage, self.bipolar)", "def setup(self):\n self.rows = test_helpers.fetch_sample_teradata_rows()\n self.csv_path = 'not/a/real/path'", "def load_cache(animelist: List[AnimeListEntry]) -> Tuple[List[AnimeListEntry], Dict[int,AnimeThemeAnime], Dict[Tuple[AnimeListSite,int],Tuple[str,int]]]:\n current_time = time.time()\n logger.debug(f'Loading animethemes data from {CACHEFILE}')\n animelist = {(site,alid):title for title,alid,site in animelist}\n \n with open(CACHEFILE) as file:\n data = json.load(file)\n animethemes = sorted(data['anime'], key=lambda x:x['_fetched_at'])\n unavalible = {(i['site'],i['alid']):(i['title'],i['fetched_at']) for i in data['unavalible_anime']}\n\n for anime in animethemes:\n for resource in anime['resources']:\n if (resource['site'],resource['external_id']) in animelist and current_time-anime['_fetched_at'] <= OPTIONS['download']['max_cache_age']:\n del animelist[resource['site'],resource['external_id']]\n \n for (site,alid),(title,fetched_at) in unavalible.items():\n if (site,alid) in animelist and current_time-fetched_at <= OPTIONS['download']['max_cache_age']:\n del animelist[site,alid]\n \n animelist = [(title,alid,site) for (site,alid),title in animelist.items()]\n animethemes = {anime['id']:anime for anime in animethemes}\n \n return animelist,animethemes,unavalible", "def __init__(self, path):\n self._path = path\n self._store = pd.HDFStore(path, \"r\")\n self._user_ids = self._load_user_ids()\n self._loads = dict()", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def loadData(self,ins):\n raise AbstractError", "def load_housing_data(housing_path=HOUSING_PATH):\n logging.info(\"Loads housing data.....\")\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)", "def _import(self, datadict):\n self.GUID = datadict.get(\"GUID\", uuid.uuid1())\n self.FileName = datadict.get(\"FileName\", \"\")\n self.Name = datadict.get(\"Name\", \"\")\n self.Projects = datadict.get(\"Projects\", [])\n self.VSVersion = datadict.get(\"VSVersion\", None)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)", "def _json_import(self, imppath):\n # TODO: Settle on JSON format for colortable\n pass", "def samples_from_sfei_erddap(run_start,cache_dir=None):\n if cache_dir is None:\n cache_dir=common.cache_dir\n \n dt_str=utils.to_datetime(run_start).strftime('%Y%m%d%H%M')\n if cache_dir is not None:\n my_cache_dir=os.path.join(cache_dir,'enviz_erddap')\n os.path.exists(my_cache_dir) or os.mkdir(my_cache_dir)\n\n cache_fn=os.path.join(my_cache_dir,\"temp_salt-%s.csv\"%dt_str)\n print(\"Cache fn: %s\"%cache_fn)\n else:\n cache_fn=None\n\n if cache_fn is not None and os.path.exists(cache_fn):\n csv_data=cache_fn\n else:\n # Fetch data before/after run_start by this much\n pad=np.timedelta64(30*60,'s')\n fetch_period=[run_start-pad,run_start+pad]\n fetch_strs=[ utils.to_datetime(p).strftime('%Y-%m-%dT%H:%M:00Z')\n for p in fetch_period ]\n\n # Because the table in ERDDAP is stored by sample, there is not guarantee that\n # times are increasing. That makes access via opendap inefficient, so instead\n # specify the query to ERDDAP more directly, and grab CSV for easier human\n # readability\n\n # choose dataset\n base_url=\"http://sfbaynutrients.sfei.org/erddap/tabledap/enviz_mirror.csv\"\n # choose fields to download\n params=\",\".join( ['stationcode','time','spcond_uS_cm','temp_C','stationname',\n 'latitude','longitude'] )\n # And the time range\n criteria=\"time%%3E=%s&time%%3C=%s\"%tuple(fetch_strs)\n url=base_url + \"?\" + params + \"&\" + criteria\n\n import requests\n logging.info(\"Fetching SFEI data from %s\"%url)\n resp=requests.get(url)\n\n if cache_fn is not None:\n with open(cache_fn,'wt') as fp:\n fp.write(resp.content.decode())\n csv_data=cache_fn\n else:\n csv_data=six.StringIO(resp.content.decode())\n \n\n # 2nd row of file has units, which we ignore.\n df=pd.read_csv(csv_data,skiprows=[1],parse_dates=['time'])\n\n # Could get fancier and choose the closest in time reading, or\n # interpolate. But this is not too bad, averaging over a total of\n # 1 hour.\n dfm=df.groupby('stationcode').mean()\n\n # Get salinity from specific conductance\n import seawater as sw\n # specific conductance to mS/cm, and ratio to conductivityt at 35 psu, 15 degC.\n # Note that mooring data comes in already adjusted to \"specific conductance\n # in uS/cm at 25 degC\"\n rt=dfm['spcond_uS_cm'].values/1000. / sw.constants.c3515\n dfm['salinity']=sw.sals(rt,25.0)\n\n ll=np.c_[dfm.longitude.values,dfm.latitude.values]\n xy=proj_utils.mapper('WGS84','EPSG:26910')(ll)\n\n xys=np.c_[xy,dfm['salinity'].values]\n valid=np.isfinite(xys[:,2])\n return xys[valid,:]", "def _load_test_data(self):\n self._save_test_data()", "def _load_data(self):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n if not os.path.exists(path):\n raise IOError('Data cache missing at %s' % path)\n\n f = bz2.BZ2File(path)\n data = pickle.loads(f.read())\n f.close()\n\n return data", "def load(self, filename):\n aead_f = open(filename, \"rb\")\n buf = aead_f.read(1024)\n if buf.startswith(YHSM_AEAD_CRLF_File_Marker):\n buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]\n if buf.startswith(YHSM_AEAD_File_Marker):\n if buf[len(YHSM_AEAD_File_Marker)] == chr(1):\n # version 1 format\n fmt = \"< I %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)\n self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)\n self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]\n else:\n raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')\n else:\n # version 0 format, just AEAD data\n self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]\n aead_f.close()", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self, django_user):\n LOAD_HQL = \\\n \"\"\"\n LOAD DATA INPATH\n '%(filename)s' OVERWRITE INTO TABLE %(tablename)s\n \"\"\"\n\n hdfs_root_destination = self._get_hdfs_root_destination(django_user)\n hdfs_file_destination = self._upload_to_hdfs(django_user, self._contents_file, hdfs_root_destination)\n\n hql = LOAD_HQL % {'tablename': self.name, 'filename': hdfs_file_destination}\n LOG.info('Running load query: %s' % hql)\n self._load_data_to_table(django_user, hql, hdfs_file_destination)", "def __init__(self, filename = None, dbalias = None, smkey = None ):\n super(HLTJobOptionsAccess,self).__init__( ConfigType.HLTJO, mainkey = \"properties\",\n filename = filename, dbalias = dbalias, dbkey = smkey )\n self.loader.setQuery([\n \"SELECT JO.HJO_DATA FROM {schema}.SUPER_MASTER_TABLE SMT, {schema}.HLT_JOBOPTIONS JO WHERE JO.HJO_ID=SMT.SMT_HLT_JOBOPTIONS_ID AND SMT.SMT_ID={dbkey}\", # for new db schema\n \"SELECT JO.JO_CONTENT FROM {schema}.SUPER_MASTER_TABLE SMT, {schema}.JO_MASTER_TABLE JO WHERE JO.JO_ID=SMT.SMT_JO_MASTER_TABLE_ID AND SMT.SMT_ID={dbkey}\" # for current db schema\n ])\n self.load()", "def loader():\n bucket = data_load_variables[\"bucket\"]\n\n if data_load_variables[\"use_lite_dataset\"]:\n dataset_name = data_load_variables[\"lite_dataset_name\"]\n else:\n dataset_name = data_load_variables[\"dataset_name\"]\n\n s3 = boto3.client('s3')\n\n obj = s3.get_object(Bucket=bucket, Key=dataset_name)\n # get object and file (key) from bucket\n\n df = pd.read_csv(obj['Body'])\n return df", "def import_file(self):\n if self.session_filename is None:\n return\n\n try:\n with open(self.session_filename, \"rb\") as f:\n data = cPickle.loads(zlib.decompress(f.read()))\n except (IOError, zlib.error, cPickle.UnpicklingError):\n return\n\n # update the skip variable to pick up fuzzing from last test case.\n self._index_start = data[\"total_mutant_index\"]\n self.session_filename = data[\"session_filename\"]\n self.sleep_time = data[\"sleep_time\"]\n self.restart_sleep_time = data[\"restart_sleep_time\"]\n self.restart_interval = data[\"restart_interval\"]\n self.web_port = data[\"web_port\"]\n self._crash_threshold_node = data[\"crash_threshold\"]\n self.total_num_mutations = data[\"total_num_mutations\"]\n self.total_mutant_index = data[\"total_mutant_index\"]\n self.netmon_results = data[\"netmon_results\"]\n self.procmon_results = data[\"procmon_results\"]\n self.is_paused = data[\"is_paused\"]", "def loadArnoldAsset(self, *args):\n asset = OL.loadStandIn(self.name)\n return asset" ]
[ "0.5765304", "0.5686181", "0.56845134", "0.56063354", "0.56054366", "0.55914676", "0.5575657", "0.55459553", "0.5527377", "0.54661703", "0.5434593", "0.54312104", "0.5410122", "0.5381183", "0.5379974", "0.5346656", "0.5326773", "0.5326303", "0.52563024", "0.52543855", "0.5239716", "0.5209885", "0.51924276", "0.51161706", "0.51098263", "0.51079005", "0.5107688", "0.50871587", "0.50703907", "0.506887", "0.5059597", "0.5050651", "0.50405926", "0.50365597", "0.501404", "0.50093126", "0.50080425", "0.5004013", "0.5004013", "0.50009865", "0.5000942", "0.4997321", "0.49683827", "0.49682346", "0.49670303", "0.49661985", "0.49654675", "0.49622762", "0.49603948", "0.49440905", "0.49176237", "0.49149728", "0.49131954", "0.49082994", "0.49074167", "0.48987368", "0.4895396", "0.48914018", "0.4882067", "0.48734877", "0.48694992", "0.48667347", "0.48601016", "0.48585695", "0.48572272", "0.4844953", "0.48353326", "0.48092744", "0.48079827", "0.48054355", "0.48024082", "0.47802135", "0.47781566", "0.47753826", "0.47714597", "0.47685534", "0.4768176", "0.4768085", "0.47659516", "0.47641265", "0.476238", "0.47611743", "0.47491363", "0.47471812", "0.47467604", "0.47234812", "0.4722008", "0.471196", "0.47104526", "0.47097263", "0.4708953", "0.47078922", "0.47078922", "0.47078922", "0.47078922", "0.47055906", "0.47042486", "0.47029778", "0.4702726", "0.47023398" ]
0.61214995
0
Return the number of timestamps missing
def count_missing_timestamps(df): no_of_timestamps = len(df.timestamp) no_of_sites = len(set(df.site_id)) full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') no_of_missing_timestamps = no_of_sites * len(full_date_range) - no_of_timestamps print(f'There are {no_of_timestamps} timestamps in the data. The full date range is {len(full_date_range)} long and' f' there are {no_of_sites} sites so there should be {no_of_sites * len(full_date_range)} ' f'timestamps in the data. There are therefore {no_of_missing_timestamps} missing. ') return no_of_missing_timestamps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_no_missing_timesteps(timesteps, verbose=True):\n timesteps = _check_timesteps(timesteps)\n # Check if there are data\n if timesteps.size == 0:\n raise ValueError(\"No data available !\")\n # Check if missing timesteps\n dt = np.diff(timesteps)\n dts, counts = np.unique(dt, return_counts=True)\n if verbose:\n print(\" --> Starting at\", timesteps[0])\n print(\" --> Ending at\", timesteps[-1])\n if len(counts) > 1:\n print(\"Missing data between:\")\n bad_dts = dts[counts != counts.max()]\n for bad_dt in bad_dts:\n bad_idxs = np.where(dt == bad_dt)[0]\n bad_idxs = [b.tolist() for b in bad_idxs]\n for bad_idx in bad_idxs:\n tt_missings = timesteps[bad_idx : (bad_idx + 2)]\n print(\"-\", tt_missings[0], \"and\", tt_missings[1])\n raise ValueError(\"The process has been interrupted\")\n return", "def calculateMissing(odf):\n df = odf.copy()\n # Calculate last minute of operation for each day in `df`\n df.loc[:, 'time'] = np.nan\n df.loc[:, 'time'] = df.index.astype(np.int64)//10**9 # (to unix timestamp) from nano seconds 10*9 to seconds\n days = df.groupby(df.index.date)['time'].agg(['min', 'max', 'count']) # aggreagate on groupby\n # total number of minutes on the day\n totalminday = (days['max']-days['min'])//60\n # minutes with data by day\n countminday = days['count'] # -1 due count is +1\n missminday = totalminday-countminday\n percmissminday = missminday/totalminday\n\n # print('not working on daemon just on jupyter notebook!!!')\n return np.mean(percmissminday) # average of missing minutes", "def test_count_when_data_is_not_present(self):\n\n temp_data = []\n\n tt = TemperatureTracker()\n result = tt.count_from(temp_data)\n self.assertEqual(result, 0)", "def __len__(self):\n if self.first_timestamp is None or self.last_timestamp is None:\n return 0\n return int(\n (self.last_timestamp - self.first_timestamp).total_seconds()\n ) // self.interval + 1", "def timestamp_length(self) -> int:\n timestamps = self.timestamps_sorted_list()\n base_length = computation.num_digits(timestamps[0]) if len(timestamps) > 0 else -1\n indexes = [1, 2, 3, 4, 5, -1, -2, -3, -4] if len(timestamps) > 10 else list(range(1, len(timestamps)))\n for n in indexes:\n length = computation.num_digits(timestamps[n])\n if length != base_length:\n return -1\n return base_length", "def N(self):\n return len(self.time)", "def count_missing_stats(manifest):\n num_missing = 0\n for element in manifest:\n if element.missing_stats():\n num_missing += 1\n return num_missing", "def test_load_points_times_length():\n df = leiap.get_points_times(warn='disable')\n assert df.shape[0] > 0", "def get_missing(self):\n return self.serie.isna().sum()", "def test_timestamp_spacing_one_missing(times):\n assert_series_equal(\n time.spacing(times[[0, 2, 3]], times.freq),\n pd.Series([True, False, True], index=times[[0, 2, 3]])\n )", "def count_placeholders(series):\n count = 0\n\n for i in range(series.size-1, -1, -1):\n if pd.isnull(series[i]) or series[i] == 0:\n count += 1\n else:\n break\n\n return count", "def get_missing_test_numbers(a_dict, logged_test_numbers):\n tnum_list = []\n for tname, t_dict in a_dict.iteritems():\n for tpin, r_dict in t_dict.iteritems():\n tnum_list.append(int(r_dict[\"Test number\"]))\n missing = list(set(tnum_list) - set(logged_test_numbers))\n return missing, tnum_list", "def test_timestamp_not_found(self, l):\n extract_columns(data=self.data, columns=['a'], timestamps=['timestamp'])\n l.check(\n ('pynts.util', 'WARNING', \"Couldn't find timestamps '['timestamp']' in data, using 'ts' instead\"),\n )", "def count(timeseries):\n try:\n return timeseries[0].points[0].value.int64_value\n except (IndexError, AttributeError) as exception:\n LOGGER.warning(\"Couldn't find any values in timeseries response\")\n LOGGER.debug(exception)\n return 0 # no events in timeseries", "def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()", "def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())", "def missing_reg(self):\n keys = []\n values = []\n count = [0] * 24\n\n for hour in self.data_file.buckets:\n for i in range(len(self.data_file.buckets[hour])):\n data_pt = self.data_file.buckets[hour][i]\n if data_pt['type'] == 'slow':\n time_before = self.data_file.buckets[hour][i - 1]['timestamp']\n time_slow = self.data_file.buckets[hour][i]['timestamp']\n if i != len(self.data_file.buckets[hour]) - 1:\n time_after = self.data_file.buckets[hour][i + 1]['timestamp']\n missing_reg_interval(keys, values, time_before, time_after, hour)\n else:\n missing_reg_interval(keys, values, time_before, time_slow, hour)\n if (time_slow - time_before) / float(Config.BOUNDARY) > 1:\n count[hour] += round((time_slow - time_before) / float(Config.BOUNDARY))\n missing_regular = dict(zip(keys, values))\n\n logger.info(f\"missing regular due to slow updates per hour: {count}\")\n logger.info(f\"missing regular due to slow updates: {missing_regular}\")\n logger.info(f\"total missing regular due to slow updates: {sum(count)}\")\n Config.ANALYSIS.write(\"\\n\")\n return missing_regular", "def test_count_when_data_present(self):\n temp_data = [(1.00, time.localtime()), (2.00, time.localtime()),\n (3.00, time.localtime()), (4.00, time.localtime())]\n\n tt = TemperatureTracker(temp_data)\n result = tt.count_from(temp_data)\n self.assertEqual(result, 4)", "def get_missing(self):\n missing_values = self.df[self.col_name].isnull().sum()\n return missing_values", "def test_null_count(self):\n\n ld = Lambdata(self.df)\n num_nulls = ld.null_count()\n self.assertEqual(num_nulls, 3)", "def find_missing(nums):\n # calculate sum of all elements\n # in input list\n sum_of_elements = sum(nums)\n\n # There is exactly 1 number missing\n n = len(nums) + 1\n actual_sum = (n * (n + 1)) / 2\n return actual_sum - sum_of_elements", "def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)", "def num_failures(self):\n min_time = time.time() - self.window\n\n while self.failures and self.failures[0] < min_time:\n self.failures.popleft()\n\n return len(self.failures)", "def test_time_supp_length_matches_no_timesteps(self):\n for no_timesteps in [5, 578, 993, 300072]:\n for dt in [0.1, 0.5, 3.0]:\n test_rec = rt.Recording(np.empty([6, no_timesteps, 1]), dt=dt)\n self.assertEqual(\n len(test_rec.time_supp),\n no_timesteps,\n 'Expected length of time_supp {} to match no_timesteps of '\n 'input {}.'.format(len(test_rec.time_supp), no_timesteps),\n )", "def secondsCount(timestamp1, timestamp2):\n return timestamp1 - timestamp2", "def _checkDT(self):\r\n dt = np.diff(self.tsec)\r\n \r\n dt_unique = np.unique(dt)\r\n \r\n if np.size(dt_unique) == 1:\r\n self.isequal = True\r\n else:\r\n self.isequal = False\r\n \r\n try:\r\n self.dt = dt[1]\r\n except:\r\n self.dt = 0.0", "def reportnulls(self):\n self.null_counts = self.df.isnull().sum().sort_values(ascending=False)\n\n # return count of null values\n return self.null_counts", "def check_trial_length(data, **_):\n # NaN values are usually ignored so replace them with Inf so they fail the threshold\n metric = np.nan_to_num(data[\"feedback_times\"] - data[\"goCue_times\"], nan=np.inf)\n passed = (metric < 60.1) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed", "def test_timestamp_spacing_too_frequent(times):\n assert_series_equal(\n time.spacing(times, '30min'),\n pd.Series([True] + [False] * (len(times) - 1), index=times)\n )", "def __len__(self):\n return len(self._timeseriesData)", "def null_count(df):\n return df.isnull().sum().sum()", "def count(time):\n \n return len(events(time))", "def find_missing_lsoas():\n\n df = pd.read_excel(os.path.join(census_data_fpath,\"london\",\"deprivation_london.xls\"),sheet_name=\"Sub domains\")\n lsoas_all = df[\"LSOA code (2011)\"].tolist()\n lsoas = dill.load(open(os.path.join(output_path_files,\"mobility\",\"antenna_lsoa_london_only.dill\"),\"rb\"))\n print len(set(lsoas.values()))\n temp = list(set(lsoas_all) - set(lsoas.values()))\n print 'total number of lsoas: {0}'.format(len(lsoas_all))\n print \"number of missing lsoas: {0} {1}\".format(len(temp), float(len(temp))/len(lsoas_all))", "def current_missing(**kwargs) -> int:\n data_path = os.environ.get(BBG_ROOT, '').replace('\\\\', '/')\n if not data_path: return 0\n return len(files.all_files(f'{data_path}/Logs/{missing_info(**kwargs)}'))", "def checkMissing(data):\r\n N, M = data.shape\r\n columns = data.columns\r\n for col in columns:\r\n nMissing = data[col].isnull().sum()\r\n if nMissing:\r\n print(\"{} has {:d} missing values, {:.2f}%\".format(col, nMissing, nMissing/N*100))\r\n return", "def test_003_not_enough_datetimes() -> None:\n df = generate_test_data()\n df = df.head(2)\n skim(df)", "def detect_time_gaps(st, min_samples=10, epsilon=1e-20, thresh_disc=100):\n # Read data\n tdata = st[0].data\n indz = np.where(abs(tdata) < epsilon)[0] # indices where we have 0\n diff_indz = indz[min_samples:] - indz[0:-min_samples] # Need min_samples consecutive samples with 0's to identify as time gap\n ind_des = np.where(diff_indz == min_samples)[0] # desired indices: value is equal to min_samples in the time gap\n ind_gap = indz[ind_des] # indices of the time gaps\n gap_start_ind = []\n gap_end_ind = []\n if (0 == len(ind_gap)): \n num_gaps = 0\n else:\n print \"Warning: %s time gap(s) with zeros found\"%len(ind_gap)\n # May have more than 1 time gap\n ind_diff = np.diff(ind_gap) # discontinuities in indices of the time gaps, if there is more than 1 time gap\n ind_disc = np.where(ind_diff > thresh_disc)[0]\n # N-1 time gaps\n curr_ind_start = ind_gap[0]\n for igap in range(len(ind_disc)): # do not enter this loop if ind_disc is empty\n gap_start_ind.append(curr_ind_start)\n last_index = ind_gap[ind_disc[igap]] + min_samples\n gap_end_ind.append(last_index)\n curr_ind_start = ind_gap[ind_disc[igap]+1] # update for next iteration\n # Last time gap\n gap_start_ind.append(curr_ind_start)\n gap_end_ind.append(ind_gap[-1] + min_samples)\n num_gaps = len(gap_start_ind)\n\n return [num_gaps, gap_start_ind, gap_end_ind]", "def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)", "def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)", "def numNotready(antReady) :\n return len(antReady.notready)", "def valid_clocks(self) -> int:\n pass", "def n_timesteps(self) -> int:\n return len(self.time)", "def data_flow_null_count(self) -> int:\n return self.graph_count - int(\n self.graph_tuple_stats.data_flow_steps_count or 0\n )", "def _check_start_timestamp(self):\n if self.descriptor.type in (\n metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,\n metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,\n metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,\n ):\n for ts in self.time_series:\n if ts.start_timestamp is None:\n raise ValueError(\"time_series.start_timestamp must exist \"\n \"for cumulative metrics\")", "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count", "def find_missing_seat():\n seats = get_seats()\n sorted_seats = _sorted_seat_ids(seats)\n\n missing = []\n for i in range(len(sorted_seats)):\n seat = sorted_seats[i]\n if (i + 1) == len(sorted_seats):\n continue\n if not sorted_seats[i + 1] == seat + 1:\n missing.append(seat + 1)\n\n return missing", "def get_pts(dat):\n return np.where(np.abs(np.diff(dat)) > 0)[0]+1", "def n_peaks_valleys(x):\n diff_sign = np.sign(x[1:] - x[:-1])\n return np.count_nonzero(diff_sign[1:] != diff_sign[:-1])", "def test_number_of_nulls(self):\n self.assertEqual(em.number_of_nulls(self.test_df), 3)", "def count_missing(a_dataframe):\n missing_count = []\n for category in a_dataframe.dtypes.keys():\n try:\n np.mean(a_dataframe.loc[:, category]) # skipping numeric\n except TypeError:\n count = sum(a_dataframe.loc[:, category] == \" ?\")\n if count > 0:\n missing_count.append((category, count))\n\n return missing_count", "def spacing(times, freq):\n if not isinstance(freq, pd.Timedelta):\n freq = pd.Timedelta(freq)\n delta = times.to_series().diff()\n # The first value will be NaT, replace it with freq so the first\n # timestamp is considered valid.\n delta.iloc[0] = freq\n return delta == freq", "def number_timestamps(self):\n if self._number_timestamps:\n return self._number_timestamps\n\n max_ = -1\n\n for task in self.tasks.values():\n for job in task.jobs.values():\n if job.activation_ts > max_:\n max_ = job.activation_ts\n if job.deadline_ts > max_:\n max_ = job.deadline_ts\n for start in job.starts:\n if start['timestamp'] > max_:\n max_ = start['timestamp']\n for finish in job.finishes:\n if finish['timestamp'] > max_:\n max_ = finish['timestamp']\n\n self._number_timestamps = max_\n return self._number_timestamps", "def missing_values_ratio(series: TimeSeries) -> float:\n\n return series.pd_dataframe().isnull().sum().mean() / len(series)", "def check_missing_values(col):\n return np.sum(np.isnan(col))", "def count_null(self): \n print('Null Counts:', self.X.isnull().sum()[self.X.isnull().sum() > 0])", "def numberOfEvents(self):\r\n if self.events == None:\r\n raise Exception('event detector has not been called for a timeseries')\r\n return len(self.events)", "def count_nonzero(a):\n return (np.count_nonzero(a))", "def calc_null(self):\n null = 0\n for x in range(0, self.tot_col):\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x].lower() == 'null':\n null += 1\n print('Total number of null fields: ' + str(null))\n results.append('Total number of null fields: ' + str(null))", "def test_bad_time_repeat(self):\n repeated = np.concatenate([np.repeat(self.times[0], 3),\n self.times[3:]])\n self.assertFalse(utils.check_timestamps(repeated))", "def test_empty_dates(self):\n\n for func in self.main_functions:\n self.assertFalse(func(20190120, 20130201).size)", "def check_empty_table(spark, df):\n return df.count()", "def __len__(self):\n return len(self.dates)", "def num_of_values(self):\n return len(self.data.dropna().unique())", "def find_nan_in_fits():\n # get nan values\n mynans = []\n for i in range(1000):\n dat = getdata('stamp_0/stamp_%d.fits.gz' % i)\n mysum = np.sum(dat)\n #print(mysum)\n if np.isnan(mysum):\n mynans.append(i)\n print('stamp_%d/stamp_%d.fits.gz ' % (i,k) , 'has sum = ', mysum)\n \n return mynans", "def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num/den*100, 2)", "def _has_timestamp_duplicates(df):\n df = df.copy()\n try:\n dup_mask = df.duplicated(subset=[TIME], keep=False)\n except KeyError:\n df = df.reset_index()\n dup_mask = df.duplicated(subset=[TIME], keep=False)\n return dup_mask.sum() > 0", "def missing_values_stats(train_data):\n bins = numpy.linspace(0, 1, 11)\n missing_ratios = _get_missing_ratios(train_data)\n digitized = numpy.digitize(missing_ratios, bins, right=True)\n missing_stats = [len(digitized[digitized == i]) for i in range(len(bins))]\n bins = numpy.insert(bins, 0, -1)\n stats = []\n stats.append(\"missing values stats:\")\n for (index, stat) in enumerate(missing_stats):\n start = \"{:.1f}\".format(bins[index])\n end = \"{:.1f}\".format(bins[index + 1])\n stats.append(f\"missing ratio: ({start}, {end}] {stat}\")\n return stats", "def count_sign_changes():\n numzero = 0\n for i in xrange(length):\n if frames[i] == 0:\n numzero += 1\n numzero /= 3 # 3 seconds\n numzero /= 2\n return numzero", "def missing_integer_simple(l):\n n = len(l)-1\n expected = n*(n+1)/2\n found = 0\n\n for num in l:\n if num is not None:\n found += num\n\n print(expected-found)", "def checkNaN(data):\n if data.isnull().values.any():\n N = data.isnull().sum().sum()\n print(\"There are {} missing values.\".format(N))", "def length(self):\n length = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i:\n length += 1\n return length", "def get_event_count(event_times, start, end):\n mask = (event_times > start) & (event_times <= end)\n return event_times[mask].size", "def get_length(self) -> int:\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()", "def __look__missing_termcount_info(self):\n logging.debug('Starting method that looks for missing Term Count data.')\n counter = 0\n max_vids_to_process = self.num_vids_to_use\n logging.info('Examining ' + str(max_vids_to_process) + ' records.')\n list_vids_no_tc_data = []\n percent_tracker = PercentTracker(max_vids_to_process, int_output_every_x_percent=10)\n for vid_id in self.transcripts_ds:\n execution_should_continue = self.var_mgr.var_retrieve(my_globals.str_execution_may_go_on)\n if (not execution_should_continue) or (counter >= max_vids_to_process):\n break\n transcript = Transcript(vid_id)\n transcript.set_transcript_directory(self.str_path_to_transcripts_files)\n transcript.load_transcript_object_from_dictionary(self.transcripts_ds.fetch_data(vid_id))\n has_tc_data = transcript.is_termcount_filename_populated()\n if not has_tc_data:\n # we are here if the video has a transcript (it exists in the transcripts SimpleDS),\n # but the field for the filename of the Term Count file has never been populated.\n list_vids_no_tc_data.append(vid_id)\n counter += 1\n percent_tracker.update_progress(counter,\n str_description_to_include_in_logging='Finding missing term-count files.')\n return list_vids_no_tc_data", "def _error_count(cls, samples: Samples) -> int:\n return cls.__sample_count(samples, \"false\")", "def missing_data_amounts():\n\n return [2]", "def missing_reg_interval(keys, values, time_before, time_after, hour):\n if is_dropped(time_after, time_before):\n keys.append((time_before, time_after))\n values.append((time_after - time_before, hour))\n Config.ANALYSIS.write(f\"{(time_before, time_after)}: {(time_after - time_before, hour)}, \"\n f\"{round((time_after - time_before) / float(Config.BOUNDARY) - 0.5)} \"\n f\"possible missing regulars\\n\")", "def test_count_day_frames(self):\n X = [\n (\"2019-12-21\", 1, \"2019-12-21\"),\n (\"2020-01-23\", 3, \"2020-02-04\"),\n (\"2020-02-03\", 1, \"2020-02-03\"),\n (\"2020-02-08\", 1, \"2020-02-08\"),\n (\"2020-02-08\", 1, \"2020-02-09\"),\n (\"2020-02-08\", 2, \"2020-02-10\"),\n (\"2020-05-01\", 20, \"2020-06-01\"),\n ]\n\n for i, (s, expected, e) in enumerate(X):\n logger.info(\"testing %s\", X[i])\n # cause 130 ± 3.34 µs, 130e-6 seconds\n actual = tf.count_day_frames(\n arrow.get(s, \"YYYY-MM-DD\").date(), arrow.get(e, \"YYYY-MM-DD\").date()\n )\n self.assertEqual(expected, actual)", "def count_lonely_sleeps(heartbeats, sleeps, delta=timedelta(minutes=30)):\n counter = 0\n lo = 0\n for sleep in sleeps:\n pos = bisect.bisect(heartbeats, sleep, lo=lo)\n lo = pos\n if not ((pos-1 >= 0 and (sleep-heartbeats[pos-1]) < delta)\n or \n (pos < len(heartbeats) and (heartbeats[pos]-sleep) < delta)):\n counter += 1\n return counter", "def getHits(self, timestamp: int) -> int:\n start = timestamp - 300\n while self.arr and self.arr[0] <= start:\n self.arr.popleft()\n self.length -= 1\n return self.length", "def test_no_source_measurements(self):\n measurement = self.measurement(self.metric())\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def timerCount(cmds):\n return int(sum(np.asarray(cmds) == 0x400001)) # numpy version\n #return cmds.count(0x400001) # python list version", "def numel(self):\n return self.t.size", "def check_miss_count(self):\n first = self.attk_arry[-1]\n second = self.attk_arry[-2]\n third = self.attk_arry[-3]\n fourth = self.attk_arry[-4]\n sum_of_attk = first + second + third + fourth\n if sum_of_attk == 8:\n self.column_arry.append(10)\n self.row_arry.append(10)\n else:\n pass", "def getHits(self, timestamp):\n hit = 0\n for i in self.l:\n if i<=timestamp and i>timestamp-300:\n hit += 1\n return hit", "def nskip(self, date, time0=None):\n time0 = self.time0 if time0 is None else Time(time0, scale='utc')\n dt = Time(date, scale='utc') - time0\n nskip = int(round((dt / self.dtsample / self.setsize)\n .to(u.dimensionless_unscaled)))\n return nskip", "def numobs(self):\n return len(self.datelist)", "def test_bad_interval(self):\n # Intentionally set a small interval (3 min) to fail.\n interval = np.timedelta64(3, 'm')\n self.assertFalse(utils.check_timestamps(self.times, interval))", "def ntimebins(self, t0, t1):\n t0 = Time(t0, scale='utc')\n t1 = Time(t1, scale='utc')\n nt = ((t1-t0).to(u.s) / self.dtsample /\n (self.setsize)).to(u.dimensionless_unscaled).value\n return np.round(nt).astype(int)", "def firstMissingPositiveInteger(arr):\n missing_value = 1\n try:\n for i in range(len(arr)):\n if arr[i] > 0:\n if arr[i] == missing_value:\n missing_value += 1\n \n except IndexError as err:\n print('IndexError --> {0}'.format(err))\n raise\n except ValueError as err:\n print('ValueError --> {0}'.format(err))\n raise\n\n finally:\n return missing_value", "def count_num_empty_tiles_not_masked(subgrid):\n\n\tnum_empty_tiles_not_masked = 0\n\tfor tile in subgrid:\n\t\tif tile == MaskedTile.EMPTY:\n\t\t\tnum_empty_tiles_not_masked += 1\n\n\treturn num_empty_tiles_not_masked", "def get_valid_intervals(timestamps, sampling_rate, gap_proportion, min_valid_len):\n\n eps = 0.0000001\n # get rid of NaN elements\n timestamps = timestamps[~np.isnan(timestamps)]\n\n gap = np.diff(timestamps) > 1.0 / sampling_rate * gap_proportion\n\n #all true entries of gap represent gaps. Get the times bounding these intervals.\n gapind = np.asarray(np.where(gap))\n # The end of each valid interval are the indeces of the gaps and the final value\n valid_end = np.append(gapind, np.asarray(len(timestamps)-1))\n\n # the beginning of the gaps are the first element and gapind+1\n valid_start = np.insert(gapind + 1, 0, 0)\n\n valid_indices = np.vstack([valid_start, valid_end]).transpose()\n\n valid_times = timestamps[valid_indices]\n # adjust the times to deal with single valid samples\n valid_times[:,0] = valid_times[:,0] - eps\n valid_times[:,1] = valid_times[:,1] + eps\n\n valid_intervals = (valid_times[:,1] - valid_times[:,0]) > min_valid_len\n\n return valid_times[valid_intervals,:]", "def na_ratio(ts: TimeSeries) -> float:\n\n return ts.pd_dataframe().isnull().sum().mean() / len(ts)", "def missing_values(self):\n missing_values = self.data.isna().sum()\n if missing_values:\n fracture = missing_values / self.data.count()\n return f\"N={missing_values},{round(fracture, 2)}%\"\n else:\n return \"no missing values\"", "def gapRunCount(letters):\n uniqLetters = map(operator.itemgetter(0), groupby(letters))\n return uniqLetters.count(\"-\")", "def no_answer_count(self):\n return (self.first_entry_count + self.subsequent_entries_count\n - self.resolved_answer_count - self.active_answer_count)", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def test_timestamp_spacing_one_timestamp(times):\n assert_series_equal(\n time.spacing(times[[0]], times.freq),\n pd.Series(True, index=[times[0]])\n )", "def count_segments_naive(self, starts, ends, points):\r\n count = [0] * len(points)\r\n \r\n for i in range(len(points)):\r\n for j in range(len(starts)):\r\n if starts[j] <= points[i] <= ends[j]:\r\n count[i] += 1\r\n \r\n return count", "def count_leap_seconds(\n GPS_Time: np.ndarray | float,\n truncate: bool = True\n ):\n # get the valid leap seconds\n leaps = get_leap_seconds(truncate=truncate)\n # number of leap seconds prior to GPS_Time\n n_leaps = np.zeros_like(GPS_Time,dtype=np.float64)\n for i,leap in enumerate(leaps):\n count = np.count_nonzero(GPS_Time >= leap)\n if (count > 0):\n indices = np.nonzero(GPS_Time >= leap)\n n_leaps[indices] += 1.0\n # return the number of leap seconds for converting to UTC\n return n_leaps" ]
[ "0.65900165", "0.6369049", "0.6316393", "0.6256028", "0.62290597", "0.617701", "0.61646646", "0.6154375", "0.60366976", "0.6026818", "0.6009626", "0.5978013", "0.59768033", "0.5961701", "0.5955014", "0.58601505", "0.5835052", "0.58279943", "0.5825557", "0.5720708", "0.5682689", "0.5680005", "0.56660974", "0.56496996", "0.5629173", "0.5614725", "0.5607036", "0.5597966", "0.55813426", "0.5561822", "0.55487144", "0.553788", "0.5525586", "0.55243933", "0.55201644", "0.55191666", "0.54993397", "0.54993033", "0.54993033", "0.54947174", "0.5492886", "0.5488416", "0.54679024", "0.54663026", "0.5465023", "0.5461523", "0.5444472", "0.5433098", "0.54219186", "0.54204184", "0.5384408", "0.5383283", "0.53828084", "0.5381028", "0.5379814", "0.5377761", "0.53659177", "0.5351934", "0.535156", "0.5342603", "0.5316553", "0.53041625", "0.52997243", "0.5299437", "0.5295806", "0.52806544", "0.5280143", "0.5279827", "0.52786255", "0.52629095", "0.5248589", "0.52404016", "0.52365446", "0.52333534", "0.5231319", "0.52303636", "0.5226423", "0.5217533", "0.52138776", "0.52137387", "0.5197961", "0.51950383", "0.5186002", "0.5179573", "0.51644343", "0.51591885", "0.5155522", "0.5154149", "0.51536226", "0.5151261", "0.5147399", "0.51461154", "0.5139866", "0.5123197", "0.5121718", "0.51205903", "0.512031", "0.51179796", "0.5116861", "0.511659" ]
0.8279923
0
Add missing timestamps to weather data and interpolate to fill in the data return df with missing times and weather data filled in
def add_missing_weather_data(df): full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') sites = list(set(df.site_id)) full_data_site_range = pd.DataFrame(itertools.product(sites, full_date_range), columns=['site_id', 'timestamp']) df_all_dates = full_data_site_range.merge(df, on=['site_id', 'timestamp'], how='left') df_all_dates = df_all_dates.groupby('site_id').apply(lambda group: group.interpolate(limit_direction='both')) return df_all_dates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auto_fillna(ts: TimeSeries,\n **interpolate_kwargs) -> TimeSeries:\n\n ts_temp = ts.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if 'limit_direction' not in interpolate_kwargs:\n interpolate_kwargs['limit_direction'] = 'both'\n interpolate_kwargs['inplace'] = True\n ts_temp.interpolate(**interpolate_kwargs)\n\n return TimeSeries.from_times_and_values(ts.time_index(), ts_temp.values)", "def fill_missing_data_points(data):\n return data.interpolate()", "def interpolate(df):\n for x in df.columns:\n if x == \"date\":\n continue\n df[x] = df[x].interpolate(method='linear', axis=0).ffill().bfill()\n return df", "def _auto_fill(series: TimeSeries, **interpolate_kwargs) -> TimeSeries:\n\n series_temp = series.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if \"limit_direction\" not in interpolate_kwargs:\n interpolate_kwargs[\"limit_direction\"] = \"both\"\n interpolate_kwargs[\"inplace\"] = True\n series_temp.interpolate(**interpolate_kwargs)\n return TimeSeries.from_dataframe(\n series_temp,\n freq=series.freq,\n static_covariates=series.static_covariates,\n hierarchy=series.hierarchy,\n )", "def fill_weather_forecast_columns(df):\n\n filled_df = df.copy()\n filled_df.loc['2018-01-01','temp_KC':'wind_north_SD'] = filled_df.loc['2018-01-02','temp_KC':'wind_north_SD'].values\n filled_df.loc['2018-02-06','temp_KC':'wind_north_SD'] = filled_df.loc['2018-02-05','temp_KC':'wind_north_SD'].values\n filled_df.loc['2019-02-05','temp_KC':'wind_north_SD'] = filled_df.loc['2019-02-04','temp_KC':'wind_north_SD'].values\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='bfill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n\n any_nans = filled_df.isna().sum(axis=0)\n \n if any_nans.sum(axis=0) != 0:\n print('The function did not convert all NaNs. Some NaNs still exist.')\n\n return filled_df", "def fill_test_weather_forecast_columns(df):\n\n filled_df = df.copy()\n filled_df.loc['2018-02-22','temp_KC':'wind_north_SD'] = filled_df.loc['2018-02-21','temp_KC':'wind_north_SD'].values\n filled_df.loc['2018-02-23','temp_KC':'wind_north_SD'] = filled_df.loc['2018-02-24','temp_KC':'wind_north_SD'].values\n # print(filled_df.loc['2018-02-21':'2018-02-24'])\n filled_df = filled_df.fillna(method='ffill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='bfill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n\n any_nans = filled_df.isna().sum(axis=0)\n \n if any_nans.sum(axis=0) != 0:\n print('The function did not convert all NaNs. Some NaNs still exist.')\n\n return filled_df", "def build_full_temp(df_temperature):\n df_interp_full = build_missing(df_temperature)\n df_full = pd.concat([df_interp_full, df_temperature], ignore_index=True)\n df_full = df_full.sort_values(by=['datetime'])\n return df_full", "def clean_meteo_data(self, df):\n for col in df.columns:\n df[col] = df[col].str.replace(',', '.').astype(\"float\")\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n df=df.fillna(method='ffill')\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n# print(\"shape selected sensor data:\",df.shape)\n df=df.dropna()\n df=df.resample(\"10T\").mean()\n df=df.reset_index()\n df['dag']=df['datetime'].dt.day\n return df", "def ts_resample(self):\n try:\n ts_freq = pd.DataFrame(\n index=pd.date_range(self.ts_df.index[0], self.ts_df.index[len(self.ts_df) - 1], freq=self.freq),\n columns=['dummy'])\n except ValueError:\n self._uvts_cls_logger.exception(\"Exception occurred, possibly incompatible frequency!\")\n sys.exit(\"STOP\")\n\n if self.fill_method == 'ffill':\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n self.ts_df.y = self.ts_df.y.fillna(method='ffill')\n # if np.isnan ( self.ts_df.y ).any ():\n # self.ts_df.y = self.ts_df.y.fillna ( method='bfill' )\n else: # interp\n xp = np.linspace(0, self.ts_df.size, self.ts_df.size, endpoint=False)\n fp = self.ts_df['y']\n # join\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n # pick new points\n x = np.linspace(0, ts_freq.size, ts_freq.size, endpoint=False)\n x = x[self.ts_df['y'].isna()]\n print(x.size)\n print(x)\n\n # put the values\n self.ts_df.y[self.ts_df['y'].isna()] = np.interp(x, xp, fp)\n\n if np.isnan(self.ts_df.y).any():\n self._uvts_cls_logger.warning(\"Some NaN found, something went wrong, check the data!\")\n sys.exit(\"STOP\")\n\n self._uvts_cls_logger.info(\"Time series resampled at frequency: \" + str(self.ts_df.index.freq) +\n \". New shape of the data: \" + str(self.ts_df.shape))\n self._uvts_cls_logger.info(\"Using time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n return self", "def smart_gas_nan_checker(smart, gas, weather, dwelling_id):\n\n print('Resampling smart, gas, weather')\n # For more resampling info see: https://pandas.pydata.org/pandas-docs/stable/api.html#id41\n # Makes missing gaps appear as NaN, these are the general raw dataframes to work with\n smart_10s = smart.resample('10s').mean()\n gas_h = gas.resample('H').mean()\n weather_10min = weather.resample('10min').mean()\n\n \"\"\"\n Create a dataframe with a 1 hour sample rate\n \"\"\"\n gas_h['gasPower'] = gas_h['gasMeter'].diff() # Calculate gasPower column\n gas_h['gasPower'][0] = gas_h['gasPower'][1] # Replace 1st entry (NaN) with 2nd entry\n\n smart_h = smart_10s.resample('H').mean() # Down sample smart\n weather_h = weather_10min.resample('H').mean() # Down sample weather\n\n # Combine gas, smart, weather\n df_hour = pd.merge(smart_h, gas_h, left_index=True, right_index=True)\n df_hour = pd.merge(df_hour, weather_h, left_index=True, right_index=True)\n\n \"\"\"\n Create smartmeter dataframe with a 10s sample rate\n \"\"\"\n gas_10s = gas_h.resample('10s').ffill() # Up sample gas to 10s\n # Calculate gasPower column, is this rhe right way? Or should we ffill it?\n # Currently this code makes it so there is one gasPower value per hour, we could ffill this also?\n gas_10s['gasPower'] = gas_10s['gasMeter'].diff()\n gas_10s['gasPower'][0] = gas_10s['gasPower'][1] # Replace 1st entry (NaN) with 2nd entry\n\n weather_10s = weather_10min.resample('10s').ffill() # forward fill because the raw data is the 10 minute mean\n\n # Combine gas, smart, weather\n df_10s = pd.merge(smart_10s, gas_10s, left_index=True, right_index=True)\n df_10s = pd.merge(df_10s, weather_10s, left_index=True, right_index=True)\n\n \"\"\"\n Do NaN analysis on the 10s and hour sample rate dataframes\n \"\"\"\n print('Length of combined df_10s: %s' % len(df_10s))\n print('df_nan_fig_10s')\n df_nan_fig_10s = plot_nans(df_10s, dwelling_id+' 10s sample rate')\n print('df_nan_table_10s')\n df_nan_table_10s = df_nan_checker(df_10s, 0)\n\n print('Length of combined df_hour: %s' % len(df_hour))\n print('df_nan_fig_hour')\n df_nan_fig_hour = plot_nans(df_hour, dwelling_id+' 1 hour sample rate')\n print('df_nan_table_hour')\n df_nan_table_hour = df_nan_checker(df_hour, 0)\n\n return df_10s, df_hour, df_nan_table_10s, df_nan_table_hour, df_nan_fig_hour, df_nan_fig_10s", "def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y", "def test_interpolate_values_1_hour_gap(self, forcing_processor):\n forcing_processor.data = {}\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), 235.0),\n ]\n forcing_processor.interpolate_values('air_temperature', 1, 1)\n expected = (datetime.datetime(2011, 9, 25, 10, 0, 0), 225.0)\n assert forcing_processor.data['air_temperature'][1] == expected", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)", "def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data", "def getEPADailyData(dateint, dt_ind, month, epa_df, yr):\n\n try:\n start = dateint + dt_ind * 10000\n end = start + 10001\n dly_epa_df = epa_df[(epa_df.created >= start) & (epa_df.created < end)]\n dly_epa_df.reset_index(inplace=True, drop=True)\n\n new_df = pd.DataFrame(columns=['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'epa_pm25_value', 'raw_concentration', 'aqi', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code', 'created'])\n for sitenm in dly_epa_df.site_name.unique():\n indx_ct = 0\n site_df = dly_epa_df[dly_epa_df.site_name == sitenm]\n for i in site_df.created.unique():\n indx_ct += 1\n new_df = pd.concat([new_df,site_df.iloc[indx_ct - 1:indx_ct]],ignore_index=True)\n\n if i != site_df.created.max(): # Don't interpolate the last record\n tmp_df = site_df.iloc[indx_ct - 1:indx_ct][['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code']]\n for j in range(1,6):\n new_dt = i + j * 10\n tmp_df['created'] = int(new_dt)\n tmp_df['epa_pm25_value'] = np.nan\n tmp_df['raw_concentration'] = np.nan\n tmp_df['aqi'] = np.nan\n new_df = pd.concat([new_df,tmp_df],ignore_index=True)\n\n # Convert aqi to numerica for so that it gets interpolated\n new_df[['aqi']] = new_df[['aqi']].replace(\"nan\", np.nan, regex=True)\n new_df[['aqi']] = new_df[['aqi']].apply(pd.to_numeric)\n\n new_df = new_df.interpolate(method='linear', limit_direction='forward', axis=0)\n\n int_epa_df = new_df[(new_df.created >= start) & (new_df.created < (end - 1))]\n int_epa_df.reset_index(inplace=True, drop=True)\n \n # Write to S3\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n write('midscapstone-whos-polluting-my-air/EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr), int_epa_df, compression='GZIP', open_with=myopen)\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr)).Acl().put(ACL='public-read')\n\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA DAILY DATA *** {}\".format(e))\n return int_epa_df", "def fillna(ts: TimeSeries, fill: float = 0) -> TimeSeries:\n\n return TimeSeries.from_times_and_values(ts.time_index(), ts.pd_dataframe().fillna(value=fill))", "def test_interpolate_values_2_hour_gap(self, forcing_processor):\n forcing_processor.data = {}\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 12, 0, 0), 230.0),\n ]\n forcing_processor.interpolate_values('air_temperature', 1, 2)\n expected = (datetime.datetime(2011, 9, 25, 10, 0, 0), 220.0)\n assert forcing_processor.data['air_temperature'][1] == expected\n expected = (datetime.datetime(2011, 9, 25, 11, 0, 0), 225.0)\n assert forcing_processor.data['air_temperature'][2] == expected", "def interpolate_series(t,y,start,stop):\n \n # daily timestamsp\n numdays = (stop-start).days\n D = [start + datetime.timedelta(days=x) for x in range(0,numdays)]\n T = [time.mktime(date.timetuple()) for date in D]\n \n # interpolated variable\n Y = np.interp(T, t, y)\n \n return (D, Y)", "def check_and_interpolate_nans(df):\n nan_count = df.isna().sum().sum()\n if nan_count > 0:\n df.interpolate(method='linear', inplace=True)\n return df", "def interpolate(self, **kwargs): # noqa: PR01\n return DataFrameDefault.register(pandas.DataFrame.interpolate)(self, **kwargs)", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def interpolate(self, new_times):\n\n # check if the times are to be grabbed from a ping_data object\n if isinstance(new_times, ping_data):\n new_times = new_times.ping_time\n\n # and interpolate\n self.data[:] = np.interp(self.ping_time, new_times,\n self.data, left=np.nan, right=np.nan)\n self.ping_time = new_times.copy()", "def clean_station_data(station_df):\n # TODO implement data preparation here\n # Fix the datetime field\n\n # Cast to numeric fields where necessary\n\n # Interpolate missing data", "def fill_nan(x):\n (n_rows, wdw) = x.shape\n new_x = np.zeros((n_rows,wdw)); new_x[:] = np.nan\n for i in range(n_rows):\n indMissing = np.where(np.isnan(x[i,:]))[0]\n l = len(x[i,indMissing]) #number of MVs\n if l < 4*wdw/5: #20% available values otherwise discarded\n new_x[i,:] = x[i,:]\n if l > 0 and indMissing[0] == 0: #missing value at index 0 \n c = 0\n while c + 1 < len(indMissing) and indMissing[c+1] == indMissing[c] + 1:\n c += 1\n new_x[i,:c+1] = x[i,c+1] #first nans replaced by first non nan value\n indMissing = np.where(np.isnan(new_x[i,:]))[0]\n l = len(new_x[i,indMissing])\n if l > 0 and indMissing[0] > 0:\n new_x[i,:] = interpolate1d(new_x[i,:]) #interpolate intermediate nans\n ind = np.where(~np.isnan(new_x).all(axis=1))[0]\n new_x = new_x[ind] #remove NaNs \n \n return new_x, ind", "def fill_missing_values(\n series: TimeSeries, fill: Union[str, float] = \"auto\", **interpolate_kwargs\n) -> TimeSeries:\n raise_if_not(\n isinstance(fill, str) or isinstance(fill, float),\n \"`fill` should either be a string or a float\",\n logger,\n )\n raise_if(\n isinstance(fill, str) and fill != \"auto\",\n \"invalid string for `fill`: can only be set to 'auto'\",\n logger,\n )\n\n if fill == \"auto\":\n return _auto_fill(series, **interpolate_kwargs)\n return _const_fill(series, fill)", "def check_nan(wseries: pd.Series) -> pd.Series:\n\n if len(wseries[pd.Series([\n (type(val) == str or isnan(val)) for val in wseries\n ], index=wseries.index)]) == 0:\n return wseries # nothing to change\n\n # ensure that all are either float or nan\n def _float_or_nan(ent):\n \"\"\"\n Force values to be either a float or nan first\n \"\"\"\n try:\n return float(ent)\n except ValueError:\n return float('nan')\n\n wseries = pd.Series(\n [_float_or_nan(val) for val in wseries], index=wseries.index,\n name=wseries.name\n )\n\n # continue with interpolation or extrapolation if needed\n inds = where(\n pd.Series([\n (isinstance(val, str) or isnan(val)) for val in wseries\n ], index=wseries.index)\n )[0] # locate the position of the problematic readings\n for ind in inds:\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-1],\n wseries.index[ind+1],\n wseries[ind-1], wseries[ind+1]\n )\n if isnan(wseries[ind]): # interpolation does not work\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError: # extrapolation\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind+2],\n wseries.index[ind+1],\n wseries[ind+2], wseries[ind+1]\n )\n return wseries\n\n return wseries", "def streaming_weather_data(**kwargs):\n df = weather_data(['San Francisco'])\n df['time'] = [pd.Timestamp.now()]\n return df.set_index('time')", "def test_parse_weather_two_missing_time(self):\n data = copy.deepcopy(self.weather_two)\n\n # Remove a time entry.\n del data['data'][0]['time']\n\n actual = timeseries.parse_weather(data)\n\n # We'll have a NaN in the Index.\n self.assertTrue(actual.index.isna().any())", "def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan", "def interpolate_data(data: pd.DataFrame) -> pd.DataFrame:\n if data.empty:\n return data\n data_arg_sort = np.argsort(data.values, axis=0)\n data_sorted = np.take_along_axis(data.values, data_arg_sort, axis=0)\n data_index = data.index.values[data_arg_sort]\n\n rows, cols = data_sorted.shape\n\n num_na = data.isna().sum(axis=0)\n not_na = rows - num_na\n\n # create a linspace for each column, each with #rows entries that span from 0 to #non-missing values in that column\n float_index = (np.tile(np.linspace(0, 1, rows), (cols, 1)) * (not_na.values[:, np.newaxis] - 1)).T\n index = np.floor(float_index).astype(int)\n decimal = float_index % 1\n\n index_min = np.minimum(index + 1, not_na.values[np.newaxis, :] - 1)\n # take the weighted average of the two neighboring values, based on the decimal\n # slightly simplified expression (1 - dec) * sorted_data[index] + decimal * sorted_data[index + 1]\n new_values = (1 - decimal) * np.take_along_axis(data_sorted, index, axis=0) +\\\n decimal * np.take_along_axis(data_sorted, index_min, axis=0)\n\n # now the index of the new values needs to be reconstructed, since each column was sorted differently\n result = []\n # reconstruct one column at a time\n for column_index, column_name in enumerate(data.columns):\n series_index = np.empty((rows,))\n series_index[:] = np.nan\n\n index_mapping = ddict(lambda: (np.nan, np.inf))\n for i, (index_val, float_val) in enumerate(zip(index[:, column_index], float_index[:, column_index])):\n decimal = float_val - index_val\n if index_mapping[index_val][1] > decimal:\n index_mapping[index_val] = i, decimal\n\n if index_mapping[index_val + 1][1] > 1 - decimal:\n index_mapping[index_val + 1] = i, 1 - decimal\n\n # the index that was added last needs to be removed\n index_mapping.pop(index_val + 1)\n\n for key, (idx, _) in index_mapping.items():\n series_index[idx] = key\n\n series_index[np.isnan(series_index)] = np.arange(np.nanmax(series_index) + 1, rows, 1)\n s_ind = data_index[:, column_index][series_index.astype(int)]\n result.append(pd.Series(new_values[:, column_index], index=s_ind, name=column_name))\n\n return pd.concat(result, axis=1, sort=False)", "def zero_end_interpolation(df: pd.DataFrame):\n end = df.index[-1]\n empty_df = pd.DataFrame(index=np.arange(0, end + 1, 1))\n res = pd.concat([df, empty_df], axis=1)\n res = res.fillna(method='ffill')\n res = res.fillna(method='bfill')\n return res", "def fill_nan(array):\n idx = np.arange(array.shape[0])\n good = np.where(np.isfinite(array))\n interp = interpolate.interp1d(idx[good], array[good], bounds_error=False)\n return np.where(np.isfinite(array), array, interp(idx))", "def fill_missing(self):\n df = self.df\n # Filling with default values\n logger.debug(\"Filling from distributions...\")\n for field in HeatStrokeDataFiller.default_map or field in HeatStrokeDataFiller.positive_default:\n if field not in df.columns:\n logger.warning(\"(%s) missing from data-frame columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to default: %s\" % (field, HeatStrokeDataFiller.default_map[field]))\n default_value = HeatStrokeDataFiller.default_map[field]\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=False)\n how_many_to_fill = np.sum(where)\n if field in HeatStrokeDataFiller.positive_default:\n # Use default positive dietributions\n distribution = HeatStrokeDataFiller.positive_default[field]\n df[field].loc[where] = distribution(how_many_to_fill)\n else:\n logger.debug(\"Using default %s for field: %s\" % (default_value, field))\n # Use default values\n df[field].loc[where] = np.array([default_value] * how_many_to_fill)\n\n # Filling with Zeros\n logger.debug(\"Fillling with zeros...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_zero:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to 0\" % field)\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = np.zeros(how_many_to_fill)\n\n # Filling in columns with the average from the rest of the column\n logger.debug(\"Filling with agerages...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_average:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from data-frame columns\" % field)\n continue\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n data = df[field][np.invert(where)]\n mean = np.mean(data)\n std = np.std(data)\n if mean == np.nan or std == np.nan:\n mean, std = (0, 0)\n logger.debug(\"Setting missing in \\\"%s\\\" with: %.3f +/- %.3f\" % (field, mean, std))\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = mean + std * np.random.random(how_many_to_fill)\n\n fields_not_modified = set(df.columns) - set(HeatStrokeDataFiller.default_map.keys()) - HeatStrokeDataFiller.fields_to_fill_with_zero - HeatStrokeDataFiller.fields_to_fill_with_zero\n logger.debug(\"Fields not modified: %s\" % fields_not_modified.__str__())\n return df", "def interpolate_missing_data(times, fluxes, cadences=None):\n first_time = times[0]\n\n if cadences is not None:\n # Median time between cadences\n dt = np.median(np.diff(times) / np.diff(cadences))\n cadence_indices = cadences - cadences[0]\n else:\n # Find typical time between cadences:\n dt = np.median(np.diff(times))\n # Approximate the patchy grid of integer cadence indices,\n # i.e.: (0, 1, 3, 4, 5, 8, ...)\n cadence_indices = np.rint((times - first_time)/dt)\n\n # Find missing cadence indices if that grid were complete\n expected_cadence_indices = set(np.arange(cadence_indices.min(),\n cadence_indices.max()))\n missing_cadence_indices = expected_cadence_indices.difference(set(cadence_indices))\n # Convert the missing cadences to times\n missing_times = first_time + np.array(list(missing_cadence_indices))*dt\n\n # Interpolate to find fluxes at missing times\n interp_fluxes = np.interp(missing_times, times, fluxes)\n\n # Combine the interpolated and input times, fluxes\n interpolated_fluxes = np.concatenate([fluxes, interp_fluxes])\n interpolated_times = np.concatenate([times, missing_times])\n\n # Sort the times, fluxes, so that you can compute the ACF on them:\n sort_by_time = np.argsort(interpolated_times)\n interpolated_fluxes = interpolated_fluxes[sort_by_time]\n interpolated_times = interpolated_times[sort_by_time]\n return interpolated_times, interpolated_fluxes", "def interp_ts(cls, df_in, ts_col, interp_ts):\n\n interp=interp1d(df_in['timestamp'],\n df_in[ts_col],\n kind='previous',\n fill_value='extrapolate')\n df_interp = pd.DataFrame({\n 'date' : [datetime.utcfromtimestamp(t) for t in interp_ts],\n 'timestamp' : interp_ts,\n ts_col : interp(interp_ts)\n })\n\n return df_interp", "def interpolate(self, column_name):\n self.check_for_column(column_name)\n\n start_date = min(self.data.index)\n end_date = max(self.data.index)\n date_range = pd.date_range(start_date, end_date, freq='H')\n self.data = self.data.reindex(date_range)\n column = self.data[column_name]\n column = column.interpolate()\n self.data[column_name] = column", "def interpolateTime(ser, time):\n def findTime(a_list, func):\n if len(a_list) == 0:\n return np.nan\n else:\n return func(a_list)\n def findValue(time):\n if np.isnan(time):\n return np.nan\n else:\n return ser[time]\n #\n time_lb = findTime([t for t in ser.index if t <= time], max)\n time_ub = findTime([t for t in ser.index if t >= time], min)\n value_lb = findValue(time_lb)\n value_ub = findValue(time_ub)\n if np.isnan(value_lb):\n return value_ub\n if np.isnan(value_ub):\n return value_lb\n if time_ub == time_lb:\n return value_ub\n frac = (time - time_lb)/(time_ub - time_lb)\n return (1 - frac)*value_lb + frac*value_ub", "def point_interp_ts(df, time_col, x_col, y_col, data_col, point_shp, point_site_col, from_crs, to_crs=None, interp_fun='cubic', agg_ts_fun=None, period=None, digits=2):\n\n #### Read in points\n if isinstance(point_shp, str) & isinstance(point_site_col, str):\n points = read_file(point_shp)[[point_site_col, 'geometry']]\n to_crs1 = points.crs\n elif isinstance(point_shp, GeoDataFrame) & isinstance(point_site_col, str):\n points = point_shp[[point_site_col, 'geometry']]\n to_crs1 = points.crs\n else:\n raise ValueError('point_shp must be a str path to a shapefile or a GeoDataFrame and point_site_col must be a str.')\n\n #### Create the grids\n df1 = df.copy()\n\n #### Resample the time series data\n if agg_ts_fun is not None:\n df1a = df1.set_index(time_col)\n if agg_ts_fun == 'sum':\n df2 = df1a.groupby([TimeGrouper(period), Grouper(y_col), Grouper(x_col)])[data_col].sum().reset_index()\n elif agg_ts_fun == 'mean':\n df2 = df1a.groupby([TimeGrouper(period), Grouper(y_col), Grouper(x_col)])[data_col].mean().reset_index()\n else:\n raise ValueError(\"agg_ts_fun should be either 'sum' or 'mean'.\")\n time = df2[time_col].unique()\n else:\n df2 = df1\n\n time = df2[time_col].sort_values().unique()\n\n #### Convert input data to crs of points shp and create input xy\n data1 = df2.loc[df2[time_col] == time[0]]\n from_crs1 = convert_crs(from_crs, pass_str=True)\n\n if to_crs is not None:\n to_crs1 = convert_crs(to_crs, pass_str=True)\n points = points.to_crs(to_crs1)\n geometry = [Point(xy) for xy in zip(data1[x_col], data1[y_col])]\n gpd = GeoDataFrame(data1.index, geometry=geometry, crs=from_crs1)\n gpd1 = gpd.to_crs(crs=to_crs1)\n x = gpd1.geometry.apply(lambda p: p.x).round(digits).values\n y = gpd1.geometry.apply(lambda p: p.y).round(digits).values\n\n xy = column_stack((x, y))\n\n #### Prepare the x and y of the points geodataframe output\n x_int = points.geometry.apply(lambda p: p.x).round(digits).values\n y_int = points.geometry.apply(lambda p: p.y).round(digits).values\n sites = points[point_site_col]\n\n xy_int = column_stack((x_int, y_int))\n\n #### Create new df\n sites_ar = tile(sites, len(time))\n time_ar = repeat(time, len(xy_int))\n x_ar = tile(x_int, len(time))\n y_ar = tile(y_int, len(time))\n new_df = DataFrame({'site': sites_ar, 'time': time_ar, 'x': x_ar, 'y': y_ar, data_col: repeat(0, len(time) * len(xy_int))})\n\n new_lst = []\n for t in to_datetime(time):\n set1 = df2.loc[df2[time_col] == t, data_col]\n new_z = griddata(xy, set1.values, xy_int, method=interp_fun).round(digits)\n new_z[new_z < 0] = 0\n new_lst.extend(new_z.tolist())\n# print(t)\n new_df.loc[:, data_col] = new_lst\n\n #### Export results\n return(new_df[new_df[data_col].notnull()])", "def ts_transform(data, ts_col, date_col=None, fill_zeros=False, fill_nan=False, fix_trend=False, lag_differentiation=1, log_transform=False, ma_transform=False, ma_periods=2,\n accumulate_transform=False, accumulation_periods=None):\n df = data.copy(deep=True)\n\n if fill_zeros is True:\n df[df[ts_col] == 0] = np.nan\n if fill_nan is True:\n if df[ts_col].isna().sum() != 0:\n print(df[ts_col].isna().sum(), ' values will be interpolated \\n', 'Of a total of ', df[ts_col].shape[0])\n df[ts_col] = df[ts_col].interpolate()\n else:\n print('No NaN Values Found')\n\n if log_transform is True:\n if any(df[ts_col] == 0):\n print('Warning, ', ts_col, ' contains ', (df[ts_col] == 0).sum(), ' Zeros, Transformation will be done with log(1+ Value)')\n df[ts_col] = np.log(df[ts_col] + 1)\n else:\n df[ts_col] = np.log(df[ts_col])\n\n if fix_trend is True:\n df[ts_col] = df[ts_col] - df[ts_col].shift(lag_differentiation)\n\n if ma_transform is True:\n df[ts_col] = df[ts_col].rolling(ma_periods).mean()\n if fill_nan is True:\n df = df.dropna(subset=[ts_col])\n\n if accumulate_transform is True:\n if accumulation_periods is None:\n print('Accumulation periods are not defined!')\n df[ts_col] = df[ts_col].rolling(accumulation_periods).sum()\n\n if date_col is not None:\n df.set_index([date_col], inplace=True)\n print('Index set as ', type(df.index))\n\n return df[ts_col]", "def interpolate(timepoint_defined, signal, interp_type, TR):\n\n timepoint_defined = np.array(timepoint_defined)\n\n true_inds = np.where(timepoint_defined == True)[0]\n false_inds = np.where(timepoint_defined == False)[0]\n\n\n signal_copy = np.array(signal)\n\n if interp_type == 'linear':\n\n #Still need to handle beginning/end cases\n\n for temp_timepoint in false_inds:\n\n\n #past_timepoint = true_inds[np.sort(np.where(true_inds < temp_timepoint)[0])[-1]]\n #future_timepoint = true_inds[np.sort(np.where(true_inds > temp_timepoint)[0])[0]]\n\n\n #Be sure there is at least one future timepoint and one past timepoint.\n #If there isn't, then grab either two past or two future timepoints and use those\n #for interpolation. If there aren't even two total past + future timepoints, then\n #just set the output to 0. Could also set the output to be unadjusted, but this\n #is a way to make the issue more obvious.\n temp_past_timepoint = np.sort(np.where(true_inds < temp_timepoint)[0])\n temp_future_timepoint = np.sort(np.where(true_inds > temp_timepoint)[0])\n\n #If we don't have enough data to interpolate/extrapolate\n if len(temp_past_timepoint) + len(temp_future_timepoint) < 2:\n\n signal_copy[temp_timepoint] = 0\n\n #If we do have enough data to interpolate/extrapolate\n else:\n\n if len(temp_past_timepoint) == 0:\n past_timepoint = true_inds[temp_future_timepoint[1]]\n else:\n past_timepoint = true_inds[temp_past_timepoint[-1]]\n\n if len(temp_future_timepoint) == 0:\n future_timepoint = true_inds[temp_past_timepoint[-2]]\n else:\n future_timepoint = true_inds[temp_future_timepoint[0]]\n\n #Find the appopriate past/future values\n past_value = signal_copy[int(past_timepoint)]\n future_value = signal_copy[int(future_timepoint)]\n\n #Use the interp1d function for interpolation\n interp_object = interp.interp1d([past_timepoint, future_timepoint], [past_value, future_value], bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint] = interp_object(temp_timepoint).item(0)\n\n return signal_copy\n\n\n #For cubic spline interpolation, instead of taking the past/future timepoint\n #we will just take the closest 5 timepoints. If there aren't 5 timepoints, we will\n #set the output to 0\n if interp_type == 'cubic_spline':\n\n sorted_good = np.sort(signal_copy[true_inds])\n min_bound = sorted_good[0]\n max_bound = sorted_good[-1]\n\n #Continue if there are at least 5 good inds\n true_inds_needed = 5\n if len(true_inds) >= true_inds_needed:\n\n for temp_timepoint in false_inds:\n\n closest_inds = true_inds[np.argsort(np.absolute(true_inds - temp_timepoint))]\n closest_vals = signal_copy[closest_inds.astype(int)]\n interp_object = interp.interp1d(closest_inds, closest_vals, kind = 'cubic', bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint.astype(int)] = interp_object(temp_timepoint).item(0)\n\n min_bound_exceded = np.where(signal_copy < min_bound)[0]\n if len(min_bound_exceded) > 0:\n\n signal_copy[min_bound_exceded] = min_bound\n\n max_bound_exceded = np.where(signal_copy > max_bound)[0]\n if len(max_bound_exceded) > 0:\n\n signal_copy[max_bound_exceded] = max_bound\n\n #If there aren't enough good timepoints, then set the bad timepoints = 0\n else:\n\n signal_copy[false_inds.astype(int)] = 0\n\n\n return signal_copy\n\n\n if interp_type == 'spectral':\n\n signal_copy = spectral_interpolation(timepoint_defined, signal_copy, TR)\n\n return signal_copy", "def _const_fill(series: TimeSeries, fill: float = 0) -> TimeSeries:\n\n return TimeSeries.from_times_and_values(\n series.time_index,\n series.pd_dataframe().fillna(value=fill),\n freq=series.freq,\n columns=series.columns,\n static_covariates=series.static_covariates,\n hierarchy=series.hierarchy,\n )", "def interpolate_wx_from_gps(harbor_data):\n # print(harbor_data[\"gps_altitude\"])\n # print(harbor_data[\"gps_times\"])\n\n # Lists to hold the interpolated data\n harbor_data[\"wx_temp_up\"] = []\n harbor_data[\"wx_alt_up\"] = []\n harbor_data[\"wx_temp_down\"] = []\n harbor_data[\"wx_alt_down\"] = []\n \n altitude_peak = 0 # Holds peak altitude of balloon\n altitude_peak_time = 0 # Holds time balloon peaks\n\n # Finds peak altitude and peak altitude time\n for count, altitude in enumerate(harbor_data[\"gps_altitude\"]):\n if altitude > altitude_peak:\n altitude_peak = altitude\n else:\n altitude_peak_time = harbor_data[\"gps_times\"][count]\n break\n\n # Populates lists of temperatures up and temperatures down\n for count, time in enumerate(harbor_data[\"wx_times\"]):\n if time < altitude_peak_time:\n harbor_data[\"wx_temp_up\"].append(harbor_data[\"wx_temperatures\"][count])\n elif time > harbor_data[\"gps_times\"][len(harbor_data[\"gps_times\"])-1]:\n break\n else:\n harbor_data[\"wx_temp_down\"].append(harbor_data[\"wx_temperatures\"][count])\n\n # Populates lists of altitudes up and altitudes down\n harbor_data[\"wx_alt_up\"] = np.linspace(harbor_data[\"gps_altitude\"][0], altitude_peak, len(harbor_data[\"wx_temp_up\"]))\n harbor_data[\"wx_alt_down\"] = np.linspace(altitude_peak, harbor_data[\"gps_altitude\"][len(harbor_data[\"gps_altitude\"])-1], len(harbor_data[\"wx_temp_down\"]))", "def test_patch_data_1_hour_gap(self, forcing_processor):\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), 235.0),\n ]\n forcing_processor.interpolate_values = Mock(name='interpolate_values')\n with patch('bloomcast.utils.log') as mock_log:\n forcing_processor.patch_data('air_temperature')\n expected = [\n (('air_temperature data patched for 2011-09-25 10:00:00',),),\n (('1 air_temperature data values patched; '\n 'see debug log on disk for details',),),\n ]\n assert mock_log.debug.call_args_list == expected\n forcing_processor.interpolate_values.assert_called_once_with(\n 'air_temperature', 1, 1)", "def fill_weather_nans(column, df, agg_func='median'):\n # aggregate data to obtain median value for a particular site, month, and day\n agg_weather_df = pd.DataFrame(df.groupby(['site_id', 'month', 'day'])[column].agg(agg_func))\n\n # check for missing values in the aggregated data\n if agg_weather_df[column].isnull().any():\n # fill NaNs using interpolation\n agg_df = agg_weather_df[column].interpolate(limit_direction='both',\n inplace=True)\n agg_weather_df.update(agg_df, overwrite=False)\n\n # set index before updating input DataFrame\n df.set_index(['site_id', 'month', 'day'], inplace=True)\n df.update(agg_weather_df, overwrite=False)\n\n # reset index\n df.reset_index(inplace=True)", "def fill_nan(A):\n\tinds = np.arange(A.shape[0])\n\tgood = np.where(np.isfinite(A))\n\tA[np.isnan(A)] = np.interp(inds[np.isnan(A)], inds[good], A[good])\n\treturn A", "def impute(sd, imputation_values, method='ffill', mask=True):\n ts = pd.read_csv(os.path.join(sd, 'timeseries.csv'))\n ts = ts.set_index('CHARTTIME')\n\n variables = list(imputation_values.keys())\n\n if mask:\n # Create an imputation mask\n ts_mask = ts.mask(ts.notna(), 1)\n ts_mask = ts_mask.mask(ts.isna(), 0)\n ts_mask = ts_mask.drop(['LOS_HOURS', 'TARGET_COARSE', 'TARGET_FINE'],\n axis=1)\n ts_mask = ts_mask.add_prefix('mask_')\n\n # Make sure that the first row contains values such that we can\n # do a forward fill impute\n for var in variables:\n if math.isnan(ts[var].iloc[0]):\n if var == 'WEIGHT' or var == 'HEIGHT':\n ts[var].iloc[0] = imputation_values[var] \\\n [str(int(round(ts['GESTATIONAL_AGE_DAYS'].iloc[0] / 7)\n ))]\n else:\n ts[var].iloc[0] = imputation_values[var]\n\n\n if method == 'ffill':\n # Impute through forward filling\n ts = ts.fillna(method='ffill')\n elif method == 'zeros':\n # Impute through filling with zeros\n ts = ts.fillna(value=0)\n else:\n raise ValueError(f'{method} must be one of \"ffill\" or \"zeros\"')\n\n if mask:\n # Concatenate the timeseries with the imputation mask\n ts = pd.concat([ts, ts_mask], axis=1)\n\n ts.to_csv(os.path.join(sd, 'timeseries_imputed.csv'))", "def interpolate(self, column_name):\n sources.Source.interpolate(self, column_name)\n\n start_date = self.scenario_day\n end_date = self.scenario_day + datetime.timedelta(hours=23)\n\n date_range = pd.date_range(start_date, end_date, freq='H')\n self.dayahead_data = self.dayahead_data.reindex(date_range)\n column = self.dayahead_data[column_name]\n column = column.interpolate(limit_direction='both')\n self.dayahead_data[column_name] = column", "def resample(df):\n\n # Reseting index\n df.sort_values(by=[\"timestamp\"], ascending=True).reset_index(inplace=True)\n df_grp_hr = (\n df.groupby(\"sensor_id\")\n .resample(\"H\", on=\"timestamp\")\n .agg({\"temperature\": \"mean\", \"humidity\": \"mean\"})\n .reset_index()\n )\n\n return df_grp_hr", "def fill_gaps(\n t, order=1, extrapolate=0, frame_max=None, hdim_1_max=None, hdim_2_max=None\n):\n\n from scipy.interpolate import InterpolatedUnivariateSpline\n\n logging.debug(\"start filling gaps\")\n\n t_list = [] # empty list to store interpolated DataFrames\n\n # group by cell number and perform process for each cell individually:\n t_grouped = t.groupby(\"cell\")\n for cell, track in t_grouped:\n # Setup interpolator from existing points (of order given as keyword)\n frame_in = track[\"frame\"].values\n hdim_1_in = track[\"hdim_1\"].values\n hdim_2_in = track[\"hdim_2\"].values\n s_x = InterpolatedUnivariateSpline(frame_in, hdim_1_in, k=order)\n s_y = InterpolatedUnivariateSpline(frame_in, hdim_2_in, k=order)\n\n # Create new index filling in gaps and possibly extrapolating:\n index_min = min(frame_in) - extrapolate\n index_min = max(index_min, 0)\n index_max = max(frame_in) + extrapolate\n index_max = min(index_max, frame_max)\n new_index = range(index_min, index_max + 1) # +1 here to include last value\n track = track.reindex(new_index)\n\n # Interpolate to extended index:\n frame_out = new_index\n hdim_1_out = s_x(frame_out)\n hdim_2_out = s_y(frame_out)\n\n # Replace fields in data frame with\n track[\"frame\"] = new_index\n track[\"hdim_1\"] = hdim_1_out\n track[\"hdim_2\"] = hdim_2_out\n track[\"cell\"] = cell\n\n # Append DataFrame to list of DataFrames\n t_list.append(track)\n # Concatenate interpolated trajectories into one DataFrame:\n t_out = pd.concat(t_list)\n # Restrict output trajectories to input data in time and space:\n t_out = t_out.loc[\n (t_out[\"hdim_1\"] < hdim_1_max)\n & (t_out[\"hdim_2\"] < hdim_2_max)\n & (t_out[\"hdim_1\"] > 0)\n & (t_out[\"hdim_2\"] > 0)\n ]\n t_out = t_out.reset_index(drop=True)\n return t_out", "def artificial_data(dt1, dt2, minutes=1):\n\n def fxweek(x):\n return 2 - x * (1 - x)\n\n def sat(x):\n return 2 * x + 2\n\n data = []\n dt = datetime.timedelta(minutes=minutes)\n while dt1 < dt2:\n if dt1.weekday() == 6:\n dt1 += dt\n continue\n if minutes <= 120 and not (dt1.hour >= 8 and dt1.hour <= 18):\n dt1 += dt\n continue\n x = (dt1.hour - 8) / 10\n if dt1.weekday() == 5:\n y = sat(x)\n else:\n y = fxweek(x)\n data.append({'time': dt1, 'y': y})\n dt1 += dt\n df = pandas.DataFrame(data)\n df['y'] += numpy.random.randn(df.shape[0]) * 0.1\n df['time'] = pandas.DatetimeIndex(df['time'])\n return df", "def test_patch_data_2_hour_gap(self, forcing_processor):\n forcing_processor.data = {}\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 12, 0, 0), 230.0),\n ]\n forcing_processor.interpolate_values = Mock()\n with patch('bloomcast.utils.log') as mock_log:\n forcing_processor.patch_data('air_temperature')\n expected = [\n (('air_temperature data patched for 2011-09-25 10:00:00',),),\n (('air_temperature data patched for 2011-09-25 11:00:00',),),\n (('2 air_temperature data values patched; '\n 'see debug log on disk for details',),),\n ]\n assert mock_log.debug.call_args_list == expected\n forcing_processor.interpolate_values.assert_called_once_with(\n 'air_temperature', 1, 2)", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def _impute_border_hours(self, temp):\n temp = temp.dropna()\n last_day = temp.index.max()\n first_day = temp.index.min()\n for hour in range(1, 4):\n new_hour = first_day - pd.DateOffset(hours=hour)\n # Get closest hour\n best = new_hour + pd.DateOffset(days=1)\n # Fill in\n temp[new_hour] = temp[best]\n\n # Now at end of time series\n new_hour = last_day + pd.DateOffset(hours=hour)\n best = new_hour - pd.DateOffset(days=1)\n temp[new_hour] = temp[best]\n return temp.sort_index()", "def test_parse_weather_two_missing_temperature(self):\n data = copy.deepcopy(self.weather_two)\n\n # Remove a temperature entry.\n del data['data'][1]['TowerDryBulbTemp']\n\n actual = timeseries.parse_weather(data)\n\n # We'll have a NaN.\n self.assertTrue(actual['temperature'].isna().any())", "def interpolate(self, var, time, lat, lon):\n\n # Get the nearest four points in space\n # Check to see if lat/lons are 2d or 1d\n if len(self['lat'].shape) == 2:\n closey, closex, distances = self.nearest_points(lat, lon, npt=4)\n # Distances in km\n# distances = np.array([self.haversine(\n# (self['lat'][y,x].values, self['lon'][y,x].values),\n# (lat, lon)) for y,x in \n# zip(list(closey), list(closex))])\n else:\n closen = self.nearest_points(lat, lon, npt=4)\n closey = closen\n closex = closen\n # Distances in km\n distances = np.array([self.haversine(\n (self['lat'][n].values, self['lon'][n].values),\n (lat, lon)) for n in list(closen)])\n # Check for exact match (within some tolerance)\n spaceweights = np.zeros(distances.shape)\n if (distances < 1.0).sum() > 0:\n spaceweights[distances.argmin()] = 1\n else:\n # Here, inverse distance weighting (for simplicity)\n spaceweights = 1.0 / distances\n spaceweights /= spaceweights.sum()\n # Get weights in time\n #time64 = np.datetime64(time)\n #all the valid times in the ensemble\n valids = self['validtime'].values\n timeweights = np.zeros(valids.shape)\n # Check if we are outside the valid time range\n if (time < valids[0]) or (time > valids[-1]):\n print(\"Interpolation is outside of time range in state!\")\n return None\n # Find where we are in this list\n #index after the time of the observation\n lastdex = (valids >= time).argmax()\n # If we match a particular time value, then\n # this is just an identity\n if valids[lastdex] == time:\n # Just make a one at this time\n timeweights[lastdex] = 1\n else:\n # Linear interpolation\n #often going to be 6 hours, subtracts datetime objects I think\n diff = (valids[lastdex] - valids[lastdex-1])\n #print(valids[lastdex], valids[lastdex-1], diff)\n #often going to be 21600 seconds\n totsec = diff.seconds\n #totsec = np.abs(diff / np.timedelta64(1, 's'))\n #ST\n #calculate time difference between time after and time of observation\n #the abs will make this positive definite, which is okay since\n #the difference will always be negative\n thisdiff = abs(time - valids[lastdex])\n #thissec = np.abs(thisdiff / np.timedelta64(1,'s'))\n thissec = thisdiff.seconds\n # Put in appropriate weights\n #ST switched the -1 between the two lines to match up with the positive-\n #definite thisdiff\n timeweights[lastdex-1] = float(thissec) / totsec\n timeweights[lastdex] = 1.0 - (float(thissec)/totsec)\n # Now that we have the weights, do the interpolation\n #ST an ntimes x 4 x nens array\n interp = self.variables[var].values[:,closey,closex,:]\n # Do a dot product with the time weights\n # And with the space weights\n if len(interp.shape) == 3:\n interp = (timeweights[:,None,None] * interp).sum(axis=0)\n else:\n interp = (timeweights[:,None,None,None] * interp).sum(axis=0)\n \n if len(interp.shape) == 3:\n #ST Changed 2nd : to None\n interp = (spaceweights[:,None,None] * interp).sum(axis=1)\n else:\n interp = (spaceweights[:,None] * interp).sum(axis=0)\n # Return estimate from all ensemble members\n return interp", "def grid_interp_ts(df, time_col, x_col, y_col, data_col, grid_res, from_crs=None, to_crs=2193, interp_fun='cubic', agg_ts_fun=None, period=None, digits=2):\n\n #### Create the grids\n df1 = df.copy()\n\n #### Resample the time series data\n if agg_ts_fun is not None:\n df1a = df1.set_index(time_col)\n if agg_ts_fun == 'sum':\n df2 = df1a.groupby([TimeGrouper(period), Grouper(y_col), Grouper(x_col)])[data_col].sum().reset_index()\n elif agg_ts_fun == 'mean':\n df2 = df1a.groupby([TimeGrouper(period), Grouper(y_col), Grouper(x_col)])[data_col].mean().reset_index()\n else:\n raise ValueError(\"agg_ts_fun should be either 'sum' or 'mean'.\")\n time = df2[time_col].unique()\n else:\n df2 = df1\n\n time = df2[time_col].sort_values().unique()\n\n if from_crs is None:\n x = df2.loc[df2[time_col] == time[0], x_col].values\n y = df2.loc[df2[time_col] == time[0], y_col].values\n else:\n data1 = df2.loc[df2[time_col] == time[0]]\n from_crs1 = convert_crs(from_crs, pass_str=True)\n to_crs1 = convert_crs(to_crs, pass_str=True)\n geometry = [Point(xy) for xy in zip(data1[x_col], data1[y_col])]\n gpd = GeoDataFrame(data1.index, geometry=geometry, crs=from_crs1)\n gpd1 = gpd.to_crs(crs=to_crs1)\n x = gpd1.geometry.apply(lambda p: p.x).round(digits).values\n y = gpd1.geometry.apply(lambda p: p.y).round(digits).values\n\n xy = column_stack((x, y))\n\n max_x = x.max()\n min_x = x.min()\n\n max_y = y.max()\n min_y = y.min()\n\n new_x = arange(min_x, max_x, grid_res)\n new_y = arange(min_y, max_y, grid_res)\n x_int, y_int = meshgrid(new_x, new_y)\n\n #### Create new df\n x_int2 = x_int.flatten()\n y_int2 = y_int.flatten()\n xy_int = column_stack((x_int2, y_int2))\n time_df = repeat(time, len(x_int2))\n x_df = tile(x_int2, len(time))\n y_df = tile(y_int2, len(time))\n new_df = DataFrame({'time': time_df, 'x': x_df, 'y': y_df, data_col: repeat(0, len(time) * len(x_int2))})\n\n new_lst = []\n for t in to_datetime(time):\n set1 = df2.loc[df2[time_col] == t, data_col]\n# index = new_df[new_df['time'] == t].index\n new_z = griddata(xy, set1.values, xy_int, method=interp_fun).round(digits)\n new_z[new_z < 0] = 0\n new_lst.extend(new_z.tolist())\n# print(t)\n new_df.loc[:, data_col] = new_lst\n\n #### Export results\n return(new_df[new_df[data_col].notnull()])", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def fill_missing(self) -> None:\n\n self.fill_missing_rows()\n self.fill_missing_source_parameters()\n return", "def get_temp():\n epts = [\"cage_coldPlate_temp\", \"cage_pressure\"]\n # t_earlier_aug = '2019-10-02T00:00'\n # t_later_aug = datetime.utcnow().isoformat()\n t_earlier_aug = '2019-09-27T13:00'\n t_later_aug = '2019-09-28T19:49'\n dfs = pandas_db_query(epts, t_earlier_aug, t_later_aug)\n print(dfs[epts[0]].tail())\n\n exit()\n\n xv = dfs[epts[0]][\"timestamp\"]\n yv = dfs[epts[0]][epts[0]]\n plt.plot(xv, yv, '-b')\n plt.ylabel(epts[0], ha='right', y=1)\n\n p1a = plt.gca().twinx()\n xv = dfs[epts[1]][\"timestamp\"]\n yv = dfs[epts[1]][epts[1]]\n p1a.set_ylabel(epts[1], color='r', ha='right', y=1)\n p1a.tick_params('y', colors='r')\n p1a.semilogy(xv, yv, '-r')\n\n plt.gcf().autofmt_xdate()\n plt.tight_layout()\n plt.show()", "def interpolate(f):\n @funnel\n def fill_missing(data, return_model=False, **kwargs):\n impute_kwargs = kwargs.pop('impute_kwargs', {})\n\n if impute_kwargs:\n model = impute_kwargs.pop('model', eval(defaults['impute']['model']))\n imputed_data, model = apply_sklearn_model(model, data, return_model=True, **impute_kwargs)\n data = pd.DataFrame(data=imputed_data, index=data.index, columns=data.columns)\n else:\n model = None\n\n if kwargs:\n kwargs = update_dict(defaults['interpolate'], kwargs, from_config=True)\n data = data.interpolate(**kwargs)\n\n if return_model:\n return data, {'model': model, 'args': [], 'kwargs': kwargs}\n else:\n return data\n\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n interp_kwargs = kwargs.pop('interp_kwargs', {})\n return f(fill_missing(data, *args, **interp_kwargs), **kwargs)\n\n return wrapped", "def fix_annotation(csv_data, time_offset = 0):\n # step 1: eliminate rows with same starttime and endtime\n csv_data = csv_data[csv_data.STARTTIME != csv_data.ENDTIME]\n\n # step 2: elminate nan in starttime and endtime\n csv_data = csv_data.dropna(axis=0,subset=[st_col,et_col])\n\n # step 3: fill \"blank\" cells\n csv_data = csv_data.reset_index(drop=True)\n csv_data[puff_col] = csv_data[puff_col].fillna(value='no-puff')\n csv_data[activity_col] = csv_data[activity_col].fillna(value='no-activity')\n csv_data[post_col] = csv_data[post_col].fillna(method='backfill')\n csv_data[post_col] = csv_data[post_col].fillna(method='ffill')\n csv_data[smoke_col] = csv_data[smoke_col].fillna(value='not-smoking')\n \n # step 4: fill 'no-activity' cells whose length is less than 3s with backfill\n csv_data = csv_data.reset_index(drop=True)\n filt = csv_data.apply(lambda x: x[et_col] - x[st_col] <= timedelta(seconds=2) and x[activity_col] == 'no-activity', axis=1)\n csv_data.ix[csv_data[filt].index, activity_col] = csv_data.ix[csv_data[filt].index+1, activity_col].values\n csv_data[activity_col] = csv_data[activity_col].fillna(value='no-activity')\n # step 5: change isolated single \"smoking\" cells into proper label\n bshift_smoke = csv_data[smoke_col].shift(1).fillna(method='backfill')\n fshift_smoke = csv_data[smoke_col].shift(-1).fillna(method='ffill')\n filt = np.logical_and(csv_data[smoke_col] != bshift_smoke, csv_data[smoke_col] != fshift_smoke)\n # print csv_data[filt]\n # ind = csv_data[filt].index\n filt1 = np.logical_and(filt, csv_data[smoke_col] == 'smoking')\n csv_data.ix[filt1, smoke_col] = 'not-smoking'\n filt = np.logical_and(csv_data[smoke_col] != bshift_smoke, csv_data[smoke_col] != fshift_smoke)\n filt2 = np.logical_and(np.logical_and(filt, csv_data[smoke_col] == 'not-smoking'), csv_data.apply(lambda x: x[et_col] - x[st_col] < timedelta(minutes=1),axis=1))\n csv_data.ix[filt2, smoke_col] = 'smoking'\n # print csv_data.iloc[ind]\n\n # step 6: turn smoking sequence without puffs into \"not smoking\"\n st_filt = np.logical_and(csv_data[smoke_col] != csv_data[smoke_col].shift(1), csv_data[smoke_col] == 'smoking')\n et_filt = np.logical_and(csv_data[smoke_col] != csv_data[smoke_col].shift(-1), csv_data[smoke_col] == 'smoking')\n cig_st = csv_data[st_filt]\n cig_et = csv_data[et_filt]\n for i in range(0,len(cig_st.index)):\n puff_flag = csv_data[cig_st.index[i]:cig_et.index[i]+1][puff_col] == 'no-puff'\n if puff_flag.all():\n csv_data[cig_st.index[i]:cig_et.index[i]+1][smoke_col] = 'not-smoking'\n\n # step 7: add offset to starttime and endtime\n # print csv_data.head()\n csv_data[et_col] = csv_data[et_col] + timedelta(seconds=time_offset)\n csv_data[st_col] = csv_data[st_col] + timedelta(seconds=time_offset)\n # print csv_data.head()\n\n # step 8: reindex from 0\n csv_data = csv_data.reset_index(drop=True)\n return csv_data", "def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2", "def _fillna_meta_cols(self):\n for col_name, fill_value in self._fillna.items():\n if col_name in self._hybrid_meta.columns:\n self._hybrid_meta[col_name].fillna(fill_value, inplace=True)\n else:\n self.__warn_missing_col(col_name, action='fill')\n\n self._hybrid_meta[self.__solar_rpi_n].fillna(-1, inplace=True)\n self._hybrid_meta[self.__wind_rpi_n].fillna(-1, inplace=True)", "def insert_fill(times, data, fillval=numpy.nan, tol=1.5, absolute=None, doTimes=True):\n times = numpy.asanyarray(times)\n data = numpy.asanyarray(data)\n assert(len(times.shape) == 1)\n if len(times) == data.shape[0]:\n timeaxis = 0\n else:\n matches = numpy.nonzero(numpy.asanyarray(data.shape) == len(times))[0]\n if len(matches) != 1:\n raise ValueError(\n \"Unable to uniquely match shape of data to count of times.\")\n timeaxis = matches[0]\n fillshape = numpy.delete(data.shape, timeaxis) #shape of data w/o time axis\n if numpy.shape(fillval) != fillshape:\n if numpy.shape(fillval) == ():\n fillval = numpy.tile(fillval, fillshape)\n else:\n raise ValueError(\"Cannot match shape of fill to shape of data\")\n diff = numpy.diff(times)\n if hasattr(diff[0], 'seconds'): #datetime\n diff = numpy.vectorize(lambda x: x.days * 86400.0 + x.seconds +\n x.microseconds / 1.0e6)(diff)\n if absolute is not None:\n absolute = absolute.days * 86400.0 + absolute.seconds + \\\n absolute.microseconds / 1.0e6\n if absolute is None:\n idx = numpy.nonzero(diff > (numpy.median(diff) * tol))[0] + 1\n else:\n idx = numpy.nonzero(diff > absolute)[0] + 1\n data = numpy.insert(data, idx, numpy.repeat(fillval, len(idx)),\n axis=timeaxis) #NOOP if no fill\n if not doTimes:\n return data\n try:\n filltimes = (times[idx] + times[idx - 1]) / 2.0\n except TypeError:\n filltimes = times[idx - 1] + numpy.vectorize(lambda x: datetime.timedelta(seconds=x / 2.0))(diff[idx - 1])\n times = numpy.insert(times, idx, filltimes)\n return times, data", "def time_continuity_gaps(data):\n indexes = data.dropna(how='all').index\n resolution = tf._get_data_resolution(indexes)\n # print(resolution)\n continuity = pd.DataFrame({'Date From': indexes.values.flatten()[:-1],\n 'Date To': indexes.values.flatten()[1:]})\n continuity['Days Lost'] = (continuity['Date To'] - continuity['Date From']) / pd.Timedelta('1 days')\n\n # Remove indexes where no days are lost before returning\n filtered = continuity[continuity['Days Lost'] != (tf._get_data_resolution(indexes) / pd.Timedelta('1 days'))]\n\n # where only one timestamp is lost replace 0 by resolution lost.\n filtered['Date From'] = filtered['Date From'] + resolution\n filtered['Date To'] = filtered['Date To'] - resolution\n filtered['Days Lost'] = (filtered['Date To'] - filtered['Date From']) / pd.Timedelta('1 days')\n filtered.replace(0, (tf._get_data_resolution(indexes) / pd.Timedelta('1 days')), inplace=True)\n\n return filtered", "def _handle_missing_vars(netcdf_start_date, netcdf_end_date, tmp_dir):\n hrs_range = arrow.Arrow.range(\n \"hour\", netcdf_start_date.shift(days=-1), netcdf_end_date.shift(hours=+23)\n )\n missing_var_hrs = {}\n for netcdf_hr in hrs_range:\n nemo_date = f\"y{netcdf_hr.year}m{netcdf_hr.month:02d}d{netcdf_hr.day:02d}\"\n nemo_hr_ds_path = tmp_dir / f\"gemlam_{nemo_date}_{netcdf_hr.hour:03d}.nc\"\n with xarray.open_dataset(nemo_hr_ds_path) as ds:\n missing_vars = ds.attrs.get(\"missing_variables\")\n if missing_vars is None:\n for var, missing_hrs in missing_var_hrs.copy().items():\n if len(missing_hrs) <= 4:\n _interpolate_intra_day_missing_var_hrs(var, missing_hrs)\n del missing_var_hrs[var]\n else:\n _interpolate_inter_day_missing_var_hrs(var, missing_hrs)\n del missing_var_hrs[var]\n else:\n for var in missing_vars.split(\", \"):\n try:\n missing_var_hrs[var].append(\n {\"hr\": netcdf_hr, \"ds_path\": nemo_hr_ds_path}\n )\n except KeyError:\n missing_var_hrs[var] = [\n {\"hr\": netcdf_hr, \"ds_path\": nemo_hr_ds_path}\n ]\n for missing_hr in missing_var_hrs.get(\"solar\", []).copy():\n # Special handling for 1feb07 to 23feb07 period in which there are no solar radiation values\n in_feb07_solar_gap = missing_hr[\"hr\"].is_between(\n arrow.get(\"2007-02-01\"), arrow.get(\"2007-02-24\"), bounds=\"[)\"\n )\n if not in_feb07_solar_gap:\n break\n # Calculate solar radiation from cloud fraction and time of day sun angle\n solar = _calc_solar_from_clouds_and_angle(\n missing_hr[\"hr\"], missing_hr[\"ds_path\"]\n )\n solar = solar.astype(\"float32\", casting=\"same_kind\")\n solar.name = \"solar\"\n solar.attrs[\"level\"] = \"surface\"\n solar.attrs[\"long_name\"] = \"Downward Short-Wave Radiation Flux\"\n solar.attrs[\"standard_name\"] = \"net_downward_shortwave_flux_in_air\"\n solar.attrs[\"units\"] = \"W/m^2\"\n with xarray.open_dataset(missing_hr[\"ds_path\"]) as ds:\n ds_w_solar = ds.copy(deep=True).assign(solar=solar)\n missing_vars = ds_w_solar.attrs[\"missing_variables\"].split(\", \")\n missing_vars.remove(\"solar\")\n if missing_vars:\n ds_w_solar.attrs[\"missing_variables\"] = \", \".join(missing_vars)\n else:\n del ds_w_solar.attrs[\"missing_variables\"]\n _write_netcdf_file(ds_w_solar, missing_hr[\"ds_path\"])\n missing_var_hrs[\"solar\"].remove(missing_hr)\n else:\n if \"solar\" in missing_var_hrs and not missing_var_hrs[\"solar\"]:\n del missing_var_hrs[\"solar\"]\n if missing_var_hrs:\n raise ValueError(f\"missing variables at end of date range: {missing_var_hrs}\")", "def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x", "def transform(self, y=None):\n forecast_dates = self.X[[\"dt_time\", \"month_day\"]]\n weather_avg = pd.read_csv(\"../data/weather_averages.csv\")\n weather_fcst = weather_avg[\n [\"DATE\", \"DLY-TMAX-NORMAL\", \"DLY-PRCP-50PCTL\", \"DLY-SNOW-50PCTL\"]\n ]\n weather_fcst[\"DATE\"] = pd.to_datetime(\n weather_fcst[\"DATE\"].astype(\"str\"), format=\"%Y%m%d\", errors=\"ignore\"\n )\n weather_fcst[\"month_day\"] = weather_fcst[\"DATE\"].dt.strftime(\"%m/%d\")\n weather_fcst = weather_fcst[\n [\"month_day\", \"DLY-PRCP-50PCTL\", \"DLY-TMAX-NORMAL\", \"DLY-SNOW-50PCTL\"]\n ].rename(\n columns={\n \"DLY-PRCP-50PCTL\": \"precip\",\n \"DLY-TMAX-NORMAL\": \"temp_max\",\n \"DLY-SNOW-50PCTL\": \"snow\",\n }\n )\n weather_fcst[\"snow\"] = 0.0\n weather_fcst = forecast_dates.join(\n weather_fcst.set_index(\"month_day\"), on=\"month_day\"\n )\n near_term_weather = seattle_weather_fcst()\n\n for i in range(len(near_term_weather)):\n weather_fcst[\"temp_max\"][\n weather_fcst[\"dt_time\"] == near_term_weather[\"date\"][i]\n ] = near_term_weather[\"temp_max\"][i]\n if near_term_weather[\"precip_bool\"][i] == 0:\n weather_fcst[\"precip\"][\n weather_fcst[\"dt_time\"] == near_term_weather[\"date\"][0]\n ] = 0\n\n self.X[[\"precip\", \"temp_max\", \"snow\"]] = weather_fcst[\n [\"precip\", \"temp_max\", \"snow\"]\n ]\n return self.X.drop(\n columns=[\n \"dt_time\",\n \"year\",\n \"month\",\n \"day\",\n \"day_of_week\",\n \"month_day\",\n \"month_weekday\",\n \"spec_day\",\n ]\n )", "def FillNA(df, method: str = 'ffill', window: int = 10):\n method = str(method).replace(\" \", \"_\")\n\n if method == 'zero':\n return fill_zero(df)\n\n elif method == 'ffill':\n return fill_forward(df)\n\n elif method == 'mean':\n return fill_mean(df)\n\n elif method == 'median':\n return fill_median(df)\n\n elif method == 'rolling_mean':\n return rolling_mean(df, window=window)\n\n elif method == 'rolling_mean_24':\n return rolling_mean(df, window=24)\n\n elif method == 'ffill_mean_biased':\n return biased_ffill(df)\n\n elif method == 'fake_date':\n return fake_date_fill(df, back_method='slice')\n\n elif method in df_interpolate_full:\n df = df.interpolate(method=method, order=5).fillna(method='bfill')\n if df.isnull().values.any():\n df = fill_forward(df)\n return df\n\n elif method == 'IterativeImputer':\n cols = df.columns\n indx = df.index\n try:\n from sklearn.experimental import enable_iterative_imputer # noqa\n except Exception:\n pass\n from sklearn.impute import IterativeImputer\n\n df = IterativeImputer(random_state=0, max_iter=100).fit_transform(df)\n if not isinstance(df, pd.DataFrame):\n df = pd.DataFrame(df)\n df.index = indx\n df.columns = cols\n return df\n\n elif method == 'IterativeImputerExtraTrees':\n cols = df.columns\n indx = df.index\n try:\n from sklearn.experimental import enable_iterative_imputer # noqa\n except Exception:\n pass\n from sklearn.ensemble import ExtraTreesRegressor\n from sklearn.impute import IterativeImputer\n\n df = IterativeImputer(\n ExtraTreesRegressor(n_estimators=10, random_state=0),\n random_state=0,\n max_iter=100,\n ).fit_transform(df)\n if not isinstance(df, pd.DataFrame):\n df = pd.DataFrame(df)\n df.index = indx\n df.columns = cols\n return df\n\n elif method == 'KNNImputer':\n cols = df.columns\n indx = df.index\n from sklearn.impute import KNNImputer\n\n df = KNNImputer(n_neighbors=5).fit_transform(df)\n if not isinstance(df, pd.DataFrame):\n df = pd.DataFrame(df)\n df.index = indx\n df.columns = cols\n return df\n\n elif method is None or method == 'None':\n return df\n\n else:\n print(f\"FillNA method `{str(method)}` not known, returning original\")\n return df", "def interpolate_na(self, method: str = \"nearest\", **kwargs):\n ds_out = xr.Dataset(attrs=self._obj.attrs)\n for var in self.vars:\n ds_out[var] = self._obj[var].raster.interpolate_na(method=method, **kwargs)\n return ds_out", "def make_dataset_for_time_series(date_key):\n by_date_key = time_series_data[time_series_data['date_key'] == date_key]\n by_date_key.sort_values('datetime', inplace=True)\n return ColumnDataSource(by_date_key), ColumnDataSource(by_date_key.interpolate('slinear'))", "def temp_series(smhi_data):\n consumable_data = {\n \"station\": smhi_data[\"station\"][\"name\"],\n \"temp\": [],\n \"from\": smhi_data[\"value\"][0][\"date\"],\n \"to\": smhi_data[\"value\"][-1][\"date\"]\n }\n for temp_post in smhi_data[\"value\"]:\n consumable_data[\"temp\"].append(float(temp_post[\"value\"]))\n return consumable_data", "def get_env_data(inp_path):\n rain1_str = []\n rain2_str = []\n tide_str = []\n with open(inp_path, 'r') as tmp_file:\n lines = tmp_file.readlines()\n for i, l in enumerate(lines):\n if l.startswith(\"[TIMESERIES]\"): # find time series section\n start = i + 3\n for i, l in enumerate(lines[start:]):\n if l.startswith('Rain1'):\n rain1_str.append(l)\n if l.startswith('Rain2'):\n rain2_str.append(l)\n if l.startswith('Tide1'):\n tide_str.append(l)\n\n rain1_data = []\n rain1_time = []\n rain2_data = []\n rain2_time = []\n tide_data = []\n tide_time = []\n for i in rain1_str:\n rain1_data.append(i.split(' ')[3].rstrip())\n rain1_time.append(i.split(' ')[1] + \" \" + i.split(' ')[2])\n\n for i in rain2_str:\n rain2_data.append(i.split(' ')[3].rstrip())\n rain2_time.append(i.split(' ')[1] + \" \" + i.split(' ')[2])\n\n for i in tide_str:\n tide_data.append(i.split(' ')[3].rstrip())\n tide_time.append(i.split(' ')[1] + \" \" + i.split(' ')[2])\n\n rain1_df = pd.DataFrame([rain1_time, rain1_data]).transpose()\n rain1_df.columns = ['datetime1', 'rain1']\n rain1_df['datetime1'] = pd.to_datetime(rain1_df['datetime1'], infer_datetime_format=True)\n rain1_df.set_index(pd.DatetimeIndex(rain1_df['datetime1']), inplace=True)\n rain1_df['rain1'] = rain1_df['rain1'].astype('float')\n rain1_df = rain1_df.resample('H').sum()\n\n rain2_df = pd.DataFrame([rain2_time, rain2_data]).transpose()\n rain2_df.columns = ['datetime2', 'rain2']\n rain2_df['datetime2'] = pd.to_datetime(rain2_df['datetime2'], infer_datetime_format=True)\n rain2_df.set_index(pd.DatetimeIndex(rain2_df['datetime2']), inplace=True)\n rain2_df['rain2'] = rain2_df['rain2'].astype('float')\n rain2_df = rain2_df.resample('H').sum()\n\n tide_df = pd.DataFrame([tide_time, tide_data], dtype='float64').transpose()\n tide_df.columns = ['datetime', 'tide']\n tide_df['datetime'] = pd.to_datetime(tide_df['datetime'], infer_datetime_format=True)\n tide_df.set_index(pd.DatetimeIndex(tide_df['datetime']), inplace=True)\n tide_df['tide'] = tide_df['tide'].astype('float')\n\n df = pd.concat([rain1_df['rain1'], rain2_df['rain2'], tide_df['tide']], axis=1)\n df[['rain1', 'rain2']].fillna(0, inplace=True)\n df.reset_index(inplace=True)\n\n return df", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def prepare_for_influxdb(df):\n df = df.drop(columns=\"landkreis\", errors=\"ignore\") # prevent name collision in get_ags()\n df = get_ags(df)\n df[\"time\"] = df.apply(lambda x: 1000000000*int(datetime.timestamp((pd.to_datetime(x[\"timestamp\"])))), 1)\n df[\"measurement\"] = \"hystreet\"\n df[\"origin\"] = \"https://hystreet.com\"\n df = df.rename(columns={\n 'station_id': '_id',\n 'pedestrians_count': 'pedestrian_count',\n 'state': 'bundesland'\n })\n df['ags'] = pd.to_numeric(df['ags'])\n # import pdb; pdb.set_trace()\n return df", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def Data_formatting(dz, y, timestep, TZ):\r\n \r\n #reindex data by datetime\r\n dz.index = pd.to_datetime(dz['DATE'])\r\n \r\n #Isolate temperature data\r\n dz = dz[['TMP']]\r\n \r\n #Delete data mistake\r\n dz = dz[dz['TMP'] != \"+9999,9\"]\r\n \r\n #Format data\r\n dz['TMP'] = dz['TMP'].str.replace(',', '.')\r\n dz['TMP'] = pd.to_numeric(dz['TMP'], errors='coerce')\r\n \r\n #Delete NaN data\r\n dz = dz.dropna()\r\n \r\n #Convert temperature\r\n dz['TMP'] = dz['TMP'] / 10\r\n dz['TMP'] = dz['TMP'] * (9/5) + 32\r\n \r\n #Convert datetime index utc to specified timezone\r\n dz.index = dz.index.tz_localize(pytz.utc).tz_convert(pytz.timezone(str(TZ))).strftime(\"%Y-%m-%d %H:%M:%S\")\r\n dz.index = pd.to_datetime(dz.index)\r\n \r\n #Resample data by average on timestep\r\n dz = dz.resample(rule = str(timestep)).mean()\r\n \r\n #Define the first date of the instance year\r\n fdy = dt.datetime.strptime(\"01/01/\"+str(y)+\" 00:00\", '%m/%d/%Y %H:%M')\r\n #Convert first date of the year to timezone\r\n fdy = Date_calibration(fdy, 0, TZ)\r\n \r\n #If we collect the date from the current year we limit the collect from 2days before now\r\n \r\n #Define the datetime 2 days before now\r\n dbeyest = dt.datetime.now(tz=pytz.timezone(str(TZ))) - dt.timedelta(days=2)\r\n \r\n #If the instance year is the current year\r\n if(y == dbeyest.year):\r\n #We limit the collect 2 days before now\r\n ldy = dt.datetime.strptime(str(dbeyest.month)+\"/\"+str(dbeyest.day)+\"/\"+str(y)+\" 23:59\", '%m/%d/%Y %H:%M')\r\n else:\r\n #Else, we collect the full year\r\n ldy = dt.datetime.strptime(\"12/31/\"+str(y)+\" 23:59\", '%m/%d/%Y %H:%M')\r\n \r\n #Convert the last date of the year to specified timezone\r\n ldy = Date_calibration(ldy, 0, TZ)\r\n \r\n #Set up dataframe for the specified datetime index and timestep\r\n ph = pd.DataFrame(index=pd.DatetimeIndex(start=fdy, end=ldy, freq=str(timestep)))\r\n \r\n #Past original data temperature in the time fitted dataframe (with the datetimeindex position)\r\n ph['TMP'] = dz['TMP']\r\n \r\n #Calculate the quality of the instance data\r\n nb_nan = ph['TMP'].isnull().sum()\r\n qual = (1 - (nb_nan) / len(ph)) * 100\r\n \r\n return dz, qual", "def get_weather_timeseries_2017():\n weather_df = pd.read_csv(\"src/kumpula-weather-2017.csv\")\n\n # -1 value in columns \"Precipitation amount (mm)\" and \"Snow depth (cm)\" mean\n # that there was no absolutely no rain or snow that day, whereas 0 can mean\n # a little of either. Let's convert the -1 values to 0 to make the dataset\n # more logical to read.\n weather_df.loc[weather_df[\"Precipitation amount (mm)\"] == -1,\n \"Precipitation amount (mm)\"] = 0\n weather_df.loc[weather_df[\"Snow depth (cm)\"] == -1, \"Snow depth (cm)\"] = 0\n\n # Create datetime index\n weather_df[\"Month\"] = weather_df[\"m\"]\n weather_df[\"Day\"] = weather_df[\"d\"]\n weather_df[\"Date\"] = pd.to_datetime(weather_df[[\"Year\", \"Month\", \"Day\"]])\n\n # Reindex dataset\n weather_df = weather_df.set_index(\"Date\")\n\n # Drop redundant columns\n weather_df.drop([\"Time\", \"Time zone\", \"m\", \"d\", \"Year\",\n \"Month\", \"Day\"], axis=\"columns\", inplace=True)\n\n return weather_df", "def concat_zone_data(thermal_data):\n concat_data = pd.concat(thermal_data.values()).sort_index()\n filter_columns = [\"zone_temperature\" not in col for col in concat_data.columns]\n return concat_data[concat_data.columns[filter_columns]]", "def missing_reg(self):\n keys = []\n values = []\n count = [0] * 24\n\n for hour in self.data_file.buckets:\n for i in range(len(self.data_file.buckets[hour])):\n data_pt = self.data_file.buckets[hour][i]\n if data_pt['type'] == 'slow':\n time_before = self.data_file.buckets[hour][i - 1]['timestamp']\n time_slow = self.data_file.buckets[hour][i]['timestamp']\n if i != len(self.data_file.buckets[hour]) - 1:\n time_after = self.data_file.buckets[hour][i + 1]['timestamp']\n missing_reg_interval(keys, values, time_before, time_after, hour)\n else:\n missing_reg_interval(keys, values, time_before, time_slow, hour)\n if (time_slow - time_before) / float(Config.BOUNDARY) > 1:\n count[hour] += round((time_slow - time_before) / float(Config.BOUNDARY))\n missing_regular = dict(zip(keys, values))\n\n logger.info(f\"missing regular due to slow updates per hour: {count}\")\n logger.info(f\"missing regular due to slow updates: {missing_regular}\")\n logger.info(f\"total missing regular due to slow updates: {sum(count)}\")\n Config.ANALYSIS.write(\"\\n\")\n return missing_regular", "def interpolate_ephemeris(self):\n #Compute the offsets into the lookup tables\n startemiss, stopemiss = self.get_emissivity_offsets()\n hourslice, starttime = self.get_hour_offsets()\n latslice = self.get_lat_offsets()\n \n #Compute the start and stop dates\n startdata = self.extract_season(self.startseason,startemiss,\n hourslice, latslice)\n stopdata = self.extract_season(self.stopseason,startemiss,\n hourslice, latslice)\n # Interpolate Season\n seasons = [self.startseason, self.stopseason]\n season_f = compute_interpolation_function(seasons, [startdata, stopdata], 'linear')\n data = season_f(self.season)\n #Interpolate time\n self.data = self.interpolatehour(hourslice, starttime, data)", "def interpolate_na(\n self, method: str = \"nearest\", extrapolate: bool = False, **kwargs\n ):\n dim0 = self.dim0\n kwargs.update(dict(method=method, extrapolate=extrapolate))\n if dim0:\n interp_data = np.empty(self._obj.shape, dtype=self._obj.dtype)\n for i, (_, sub_xds) in enumerate(self._obj.groupby(dim0)):\n interp_data[i, ...] = self._interpolate_na(\n sub_xds.load().data, **kwargs\n )\n else:\n interp_data = self._interpolate_na(self._obj.load().data, **kwargs)\n interp_array = xr.DataArray(\n name=self._obj.name,\n dims=self._obj.dims,\n coords=self._obj.coords,\n data=interp_data,\n attrs=self._obj.attrs,\n )\n interp_array.raster.set_nodata(self.nodata)\n interp_array.raster.set_crs(self.crs)\n return interp_array", "def test_patch_data_2_gaps(self, forcing_processor):\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 12, 0, 0), 230.0),\n (datetime.datetime(2011, 9, 25, 13, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 14, 0, 0), 250.0),\n ]\n forcing_processor.interpolate_values = Mock()\n with patch('bloomcast.utils.log') as mock_log:\n forcing_processor.patch_data('air_temperature')\n expected = [\n (('air_temperature data patched for 2011-09-25 10:00:00',),),\n (('air_temperature data patched for 2011-09-25 11:00:00',),),\n (('air_temperature data patched for 2011-09-25 13:00:00',),),\n (('3 air_temperature data values patched; '\n 'see debug log on disk for details',),),\n ]\n assert mock_log.debug.call_args_list == expected\n expected = [(('air_temperature', 1, 2),), (('air_temperature', 4, 4),)]\n assert forcing_processor.interpolate_values.call_args_list == expected", "def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['Day'] = df['Date'].dt.strftime('%d').astype(int)\n df2['Day_of_Week'] = df['Day_of_Week']\n df2['Time'] = np.array([t.timestamp() for t in df['Time']]) - df['Time'].min().timestamp()\n df2['Weather_Conditions'] = df['Weather_Conditions']\n return pd.get_dummies(df2)", "def correct_weather_data(df):\n\n columns = {'Date UTC': 'date',\n 'T° (C)': 'temperature',\n 'P (hPa)': 'pression',\n 'HR (%)': 'HR',\n 'P.rosée (°C)': 'rosee',\n 'Visi (km)': 'visibilite',\n 'Vt. moy. (km/h)': 'v_moy',\n 'Vt. raf. (km/h)': 'v_raf',\n 'Vt. dir (°)': 'v_dir',\n 'RR 3h (mm)': 'RR3h',\n 'Neige (cm)': 'neige',\n 'Nebul. (octats)': 'nebul'}\n\n df = df.rename(columns=columns)\n df['date'] = df['date'].str.replace('h', ':')\n df['date'] = pd.to_datetime(df['date'], dayfirst=True)\n\n return df", "def add_timing_info(df):\n\n # This is so hacky. There has to be a better way.\n df[\"hour\"] = None\n df.loc[:, \"hour\"] = [str(x) if x > 10 else \"0\" + str(x)\n for x in df.index.hour]\n num_measurements_per_hour = df.groupby(\n [df.index.date, df[\"hour\"]]).count().reset_index()\n hours_to_use_df = num_measurements_per_hour[num_measurements_per_hour[\"Orientation\"] == 720]\n hours_to_use_str = hours_to_use_df[\"level_0\"].astype(\n str) + \" \" + hours_to_use_df[\"hour\"].astype(str)\n hours_to_use_datetime = pd.to_datetime(\n hours_to_use_str, format='%Y-%m-%d %H')\n\n df[\"complete_hour\"] = 0\n df.loc[df.index.floor(\"H\").isin(\n hours_to_use_datetime), \"complete_hour\"] = 1\n\n min_night_length = 720 * 5\n df_num = df.groupby(\"sleep_night\")[\"hour\"].count()\n df_num_to_use = df_num.loc[df_num >= min_night_length]\n\n df[\"complete_night\"] = 0\n df.loc[df[\"sleep_night\"].isin(df_num_to_use.index), \"complete_night\"] = 1\n\n # Counts number of time points since position started\n df[\"time_since_pos_start\"] = df.groupby([\"sleep_night\", (df[\"orient_bin\"] != df[\"orient_bin\"].shift()).cumsum()]).cumcount() + 1\n\n return df", "def filldf(df, response, sorted_selection, params_selection, constant=True, verbose=True):\n selections_iter = iter(sorted_selection)\n params_iter = iter(params_selection)\n idxmissing = df[response][df[response].isnull() == True].index # slect where their is missing data\n\n print(\"Filling .... \")\n\n while len(idxmissing) > 0:\n print(\"Their is [\" + str(len(idxmissing)) + \"] events missing\")\n\n try: # Try if their is still other stations to fill with\n selection = next(selections_iter)\n param = next(params_iter)\n except StopIteration:\n print(\"NO MORE SELECTED STATIONS\")\n break\n\n try:\n Y = df.loc[:, response]\n X1 = df.loc[:, selection[0]]\n X2 = df.loc[:, selection[1]]\n select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna()\n if constant:\n newdata = param[0] + param[1] * select['X1'] + param[2] * select['X2'] # reconstruct the data\n else:\n newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data\n\n df.loc[idxmissing, response] = newdata.loc[idxmissing]\n idxmissing = df[response][df[response].isnull() == True].index # slect where their is missing data\n except KeyError:\n if verbose:\n print('Selected stations ' + str(selection) + 'did not fill any events')\n else:\n pass\n\n except ValueError:\n if verbose:\n print('The variable ' + var + \"Does not exist or no data to do the multilinear regression \")\n else:\n pass\n\n return df.loc[:, response]", "def interpolate(self, data):\n idx = next((index for index, val in enumerate(self.currentDataset[\"results\"][\"time\"])\n if val >= self.playbackTime), None)\n\n if idx is None:\n self._logger.info(u\"interpolate(): Error no entry found for t={0}\".format(self.playbackTime))\n return None\n else:\n if len(data.shape) == 1:\n return data[idx]\n elif len(data.shape) == 2:\n return data[idx, :]\n else:\n self._logger.info(u\"interpolate(): Error Dimension {0} not understood.\".format(data.shape))\n return None", "def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return", "def temperature_only_data_prep(observations, predictors, for_prediction=False,\n verbose=True):\n predictors = predictors[['doy', 'site_id', 'year', 'temperature']].copy()\n doy_series = predictors.doy.dropna().unique()\n doy_series.sort()\n predictors = predictors.pivot_table(index=['site_id', 'year'], columns='doy', values='temperature').reset_index()\n\n # This first and last day of temperature data can causes NA issues because\n # of leap years.If thats the case try dropping them\n first_doy_has_na = predictors.iloc[:, 2].isna().any() # first day will always be col 2\n if first_doy_has_na:\n first_doy_column = predictors.columns[2]\n predictors.drop(first_doy_column, axis=1, inplace=True)\n doy_series = doy_series[1:]\n warn(\"\"\"Dropped temperature data for doy {d} due to missing data. Most likely from leap year mismatch\"\"\".format(d=first_doy_column))\n\n last_doy_index = predictors.shape[1] - 1\n last_doy_has_na = predictors.iloc[:, last_doy_index].isna().any()\n if last_doy_has_na:\n last_doy_column = predictors.columns[-1]\n predictors.drop(last_doy_column, axis=1, inplace=True)\n doy_series = doy_series[:-1]\n warn(\"\"\"Dropped temperature data for doy {d} due to missing data. Most likely from leap year mismatch\"\"\".format(d=last_doy_column))\n\n # Dont need the doy column if it's present and prediction is being done\n if for_prediction and 'doy' in observations.columns:\n observations = observations.drop('doy', axis=1)\n # Give each observation a temperature time series\n obs_with_temp = observations.merge(predictors, on=['site_id', 'year'], how='left')\n\n # Deal with any site/years that don't have temperature data\n original_sample_size = len(obs_with_temp)\n rows_with_missing_data = obs_with_temp.isnull().any(axis=1)\n missing_info = obs_with_temp[['site_id', 'year']][rows_with_missing_data].drop_duplicates()\n if len(missing_info) > 0:\n obs_with_temp.dropna(axis=0, inplace=True)\n n_dropped = original_sample_size - len(obs_with_temp)\n warn('Dropped {n0} of {n1} observations because of missing data'.format(n0=n_dropped, n1=original_sample_size) +\n '\\n Missing data from: \\n' + str(missing_info))\n\n temperature_array = obs_with_temp[doy_series].values.T\n\n if for_prediction:\n return temperature_array, doy_series\n else:\n observed_doy = obs_with_temp.doy.values\n return observed_doy, temperature_array, doy_series", "def get_data(temp_estimate_source='historic'):\n\n print 'get data from files'\n load = process_load_data(datafoldername+loadfilename_train)\n load_test = process_load_data(datafoldername+loadfilename_test)\n temp = process_temp_data(datafoldername+tempfilename_train)\n holidays = process_holiday_data(datafoldername+holidayfilename)\n\n print 'merge load with temp data'\n X_train_df = load.merge(temp, on='datetime', how='left')\n X_test_df = load_test.merge(temp, on='datetime', how='left')\n\n print 'estimate missing temps'\n # find rows with missing temperatures\n missingtemp = X_test_df[X_test_df.isnull().any(axis=1)][['datetime', 'zone_id']].copy()\n\n # source estimates for missing periods\n if temp_estimate_source == 'arima':\n # use preprocessed arima estimates\n estimatedtemps = process_arima_temp_data(datafoldername+arimafilename)\n elif temp_estimate_source == 'actuals':\n # use actual temperatures - as provided after conclusion of kaggle competition\n estimatedtemps = process_temp_data(datafoldername+tempfilename_solution)\n else:\n # use means of historical temps at same day/time.\n estimatedtemps = get_estimated_temps(missingtemp[['datetime']].drop_duplicates(), temp)\n\n # merge estimates against missing rows, and use to update original dataframe in place\n replacementtemps = missingtemp.merge(estimatedtemps, left_on='datetime', right_on='datetime', how='left')\n replace_unknown_temps(X_test_df, replacementtemps)\n\n print 'merge in holiday dates'\n X_train_df = X_train_df.merge(holidays, on='datetime', how='left')\n X_train_df['holiday'].fillna(0, inplace=True)\n X_test_df = X_test_df.merge(holidays, on='datetime', how='left')\n X_test_df['holiday'].fillna(0, inplace=True)\n\n print 'add datetime categorical variables'\n X_train_df = add_datetime_categories(X_train_df)\n X_test_df = add_datetime_categories(X_test_df)\n\n return X_train_df, X_test_df", "def interpolate_triplet(r,g,b,timestamps, start, stop):\n numdays = (stop-start).days\n hires_dates = [start + datetime.timedelta(days=x) for x in range(0,numdays)]\n hires_time = [time.mktime(date.timetuple()) for date in hires_dates]\n \n # interpolate r,g,b\n R = np.clip(np.interp(hires_time, timestamps, r),0,1)\n G = np.clip(np.interp(hires_time, timestamps, g),0,1)\n B = np.clip(np.interp(hires_time, timestamps, b),0,1)\n \n return list(zip(R,G,B))", "def linear_interpolate_value_at_time(t0, v0, t1, v1, t):\n return v0 + linear_interpolate_value_change(t0, v0, t1, v1, t - t0)", "def recreate_sampling_times(\n data: DataFrame,\n step_length: float,\n start_time: float,\n end_time: float,\n plot_col=None,\n) -> DataFrame:\n\n first_time_in_df = data[DFKeys.TIME.value].iloc[0]\n if start_time < first_time_in_df:\n raise ValueError(\"start time cannot precede first time in df\")\n\n get_shifted_time = lambda row: row[DFKeys.TIME.value] - start_time\n shifted_timestamps = data.apply(get_shifted_time, axis=1).rename(\n DFKeys.TIME.value, axis=1\n )\n\n duration = end_time - start_time\n timesteps = np.arange(0, duration, step_length)\n new_columns = [pd.Series(timesteps, name=DFKeys.TIME.value)]\n columns_except_time = data.columns.difference(\n [\n DFKeys.TIME.value,\n \"child_frame_id\",\n \"header.frame_id\",\n \"header.seq\",\n \"header.stamp.nsecs\",\n \"header.stamp.secs\",\n \"pose.covariance\",\n \"twist.covariance\",\n \"pins_0\",\n \"pins_1\",\n \"pins_2\",\n \"pins_3\",\n \"pins_4\",\n \"pins_5\",\n \"pins_6\",\n \"pins_7\",\n ]\n )\n\n for col_name in columns_except_time:\n f = interp1d(shifted_timestamps.values, data[col_name].values)\n new_columns.append(pd.Series(f(timesteps), name=col_name))\n\n data_new = pd.concat(new_columns, axis=1)\n\n if plot_col in data.columns:\n SAVEDIR = Path(\"results/interpolation\")\n sea.set_style(\"white\")\n # plt.figure(figsize=(5, 2.5))\n sea.lineplot(x=shifted_timestamps.values, y=data[plot_col], label=\"original\")\n sea.lineplot(\n x=DFKeys.TIME.value, y=plot_col, data=data_new, label=\"interpolated\"\n )\n # plt.ylabel(\"Velocity\")\n # plt.savefig(SAVEDIR.joinpath(\"%s.pdf\" % plot_col))\n plt.show()\n\n return data_new", "def interpolate_quaternions(datapacket_quaternions, datapacket_timestamps, path):\n print(\"Interpolating: \" + path + \"quaternions_datapacket.csv\")\n previous_quaternion = np.asarray([1000, 1000, 1000, 1000]) # initialization of variable\n previous_time = 0\n interpolated_quaternions = []\n timestamp = []\n equal_quaternions = []\n for quat, time in zip(datapacket_quaternions, datapacket_timestamps):\n if np.array_equal(previous_quaternion, quat):\n if not equal_quaternions:\n equal_quaternions.append(previous_quaternion)\n interpolated_quaternions.pop()\n timestamp.append(previous_time)\n equal_quaternions.append(quat)\n timestamp.append(time)\n\n else:\n if equal_quaternions:\n diff_quat = quat - previous_quaternion\n diff_time = time - timestamp[0]\n a = diff_quat / diff_time # slope\n b = equal_quaternions[0] - a * timestamp[0] # y-intercept\n for t in timestamp:\n q = a * t + b # y = ax + b\n interpolated_quaternions.append(q)\n interpolated_quaternions.append(quat)\n equal_quaternions = []\n timestamp = []\n previous_quaternion = quat[:]\n previous_time = time\n\n else:\n interpolated_quaternions.append(quat)\n previous_quaternion = quat[:]\n previous_time = time\n if equal_quaternions:\n # appending the last quaternions if they are all equal\n for quat in equal_quaternions:\n interpolated_quaternions.append(quat)\n return interpolated_quaternions", "def get_gridcell_history(\n lat,\n lon,\n dataset,\n also_return_snapped_coordinates=False,\n also_return_metadata=False,\n use_imperial_units=True,\n convert_to_local_time=True,\n as_of=None,\n ipfs_timeout=None):\n try:\n metadata = get_metadata(get_heads()[dataset])\n except KeyError:\n raise DatasetError(\"No such dataset in dClimate\")\n\n # set up units\n converter, dweather_unit = get_unit_converter(metadata[\"unit of measurement\"], use_imperial_units)\n\n # get dataset-specific \"no observation\" value\n missing_value = metadata[\"missing value\"]\n try:\n dataset_obj = GRIDDED_DATASETS[dataset](as_of=as_of, ipfs_timeout=ipfs_timeout)\n except KeyError:\n raise DatasetError(\"No such dataset in dClimate\")\n\n try:\n (lat, lon), resp_series = dataset_obj.get_data(lat, lon)\n\n except (ipfshttpclient.exceptions.ErrorResponse, ipfshttpclient.exceptions.TimeoutError, KeyError, FileNotFoundError) as e:\n raise CoordinateNotFoundError(\"Invalid coordinate for dataset\")\n\n # try a timezone-based transformation on the times in case we're using an hourly set.\n if convert_to_local_time:\n try:\n tf = TimezoneFinder()\n local_tz = pytz.timezone(tf.timezone_at(lng=lon, lat=lat))\n resp_series = resp_series.tz_localize(\"UTC\").tz_convert(local_tz)\n except (AttributeError, TypeError): # datetime.date (daily sets) doesn't work with this, only datetime.datetime (hourly sets)\n pass\n\n if type(missing_value) == str:\n resp_series = resp_series.replace(missing_value, np.NaN).astype(float)\n else:\n resp_series.loc[resp_series.astype(float) == missing_value] = np.NaN\n resp_series = resp_series.astype(float)\n \n resp_series = resp_series * dweather_unit\n if converter is not None:\n resp_series = pd.Series(converter(resp_series.values), resp_series.index)\n result = {k: convert_nans_to_none(v) for k, v in resp_series.to_dict().items()}\n \n if also_return_metadata:\n result = tupleify(result) + ({\"metadata\": metadata},)\n if also_return_snapped_coordinates:\n result = tupleify(result) + ({\"snapped to\": (lat, lon)},)\n return result", "def merge_weather_trails(df_weather, df_hike):\n df_trail_year = pd.merge(\n df_hike, df_weather, how='left', left_on=[\n 'closet_station', 'last_year'], right_on=[\n 'name', 'DATE'])\n df_all_clean = df_trail_year.drop(['DATE', 'name'], axis=1)\n return df_all_clean", "def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def ffill_with_timelimit(df, time_limit): \n #--Setup\n #Convert time limit to correct format\n if (type(time_limit) == str):\n tl = pd.to_timedelta(time_limit)\n else:\n tl = time_limit\n cid, ts, ent = df.columns #Store colnames for easy reference\n \n #Computation\n dfi = time_since_last(df).to_frame() #Get time since last measurement as a df for internal usage\n dfi['ffil_ok'] = dfi['TSL'] <= tl #Mask whether we should ffil\n dfi['ful_ffil'] = df.groupby(cid)[ent].ffill()\n dfi.loc[dfi['ffil_ok'], 'out'] = dfi.loc[dfi['ffil_ok'], 'ful_ffil'] \n \n return dfi['out']" ]
[ "0.7080858", "0.68412477", "0.6801546", "0.67576146", "0.6569322", "0.6546012", "0.6509167", "0.6458176", "0.6420076", "0.6409065", "0.6344805", "0.6284957", "0.62729025", "0.6232209", "0.6207936", "0.6158501", "0.61142576", "0.6104147", "0.606371", "0.5919506", "0.59161085", "0.5889237", "0.58862394", "0.5869328", "0.58683664", "0.58384943", "0.58332586", "0.5822401", "0.58221453", "0.58218294", "0.5805339", "0.5792186", "0.5791326", "0.57804805", "0.5764508", "0.574721", "0.57144666", "0.5709975", "0.57015973", "0.56902367", "0.5676393", "0.5650466", "0.56258166", "0.56087506", "0.5606053", "0.55853754", "0.55834293", "0.5577712", "0.5554948", "0.55509156", "0.5530902", "0.5526783", "0.5521849", "0.551724", "0.5481171", "0.5479413", "0.5471764", "0.5469204", "0.5460419", "0.54228324", "0.5401167", "0.53891", "0.538095", "0.5376866", "0.5349452", "0.53432846", "0.53206664", "0.53136253", "0.53136206", "0.5311051", "0.5277728", "0.52629256", "0.52618325", "0.52562505", "0.5254339", "0.52488846", "0.5243777", "0.5236779", "0.52364105", "0.5235848", "0.5226119", "0.5225789", "0.52242655", "0.5219255", "0.52138424", "0.5213462", "0.5205815", "0.5186382", "0.518212", "0.5171268", "0.51658595", "0.51625603", "0.5158347", "0.5157532", "0.51569396", "0.5152532", "0.51388395", "0.51373297", "0.5121002", "0.5101958" ]
0.8054154
0
Convert timestamps to timestamp objects, fill in blanks in weather data, add names of meter types
def clean_data(raw_data, names=const.NAMES, meter_map=const.METER_MAP): cleaned_data = {} local_names = names.copy() if 'building_metadata' in local_names: local_names.remove('building_metadata') for name in local_names: print(f'Cleaning {name} dataset') df = raw_data[name] df.timestamp = pd.to_datetime(df.timestamp) if name.startswith('weather'): df = add_missing_weather_data(df) elif name in ['train', 'test']: df['meter_type'] = df['meter'].map(meter_map) cleaned_data[name] = df cleaned_data['building_metadata'] = raw_data['building_metadata'] return cleaned_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_time(data, metadata):\n timestamp_name = metadata[\"timestamp_name\"]\n if timestamp_name == \"\":\n timestamp_name = \"fake_ts\"\n data[timestamp_name] = data.index\n\n data[timestamp_name] = pd.to_datetime(data[timestamp_name])\n data.sort_values(by=[timestamp_name], inplace=True)\n data.set_index([timestamp_name], inplace=True)\n\n return data", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def clean_meteo_data(self, df):\n for col in df.columns:\n df[col] = df[col].str.replace(',', '.').astype(\"float\")\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n df=df.fillna(method='ffill')\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n# print(\"shape selected sensor data:\",df.shape)\n df=df.dropna()\n df=df.resample(\"10T\").mean()\n df=df.reset_index()\n df['dag']=df['datetime'].dt.day\n return df", "def test_parse_weather_two_missing_time(self):\n data = copy.deepcopy(self.weather_two)\n\n # Remove a time entry.\n del data['data'][0]['time']\n\n actual = timeseries.parse_weather(data)\n\n # We'll have a NaN in the Index.\n self.assertTrue(actual.index.isna().any())", "def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def streaming_weather_data(**kwargs):\n df = weather_data(['San Francisco'])\n df['time'] = [pd.Timestamp.now()]\n return df.set_index('time')", "def format_data(self, raw_data):\n opz = raw_data.copy()\n opz['datetime'] = pd.to_datetime(opz['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n opz.drop(['Datum-tijd'],axis=1, inplace=True)\n opz['dag']=opz['datetime'].dt.day\n opz['tijd'] = opz['datetime'].dt.time\n #voeg open/dicht data toe en bepaal momenten waarop dit wisselt\n opz['Opzetstuk Noord (°)'] = opz['Opzetstuk Noord (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Zuid (°)'] = opz['Opzetstuk Zuid (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Noord (°)'].fillna(opz['Opzetstuk Zuid (°)'], inplace=True)\n opz['Opzetstuk Zuid (°)'].fillna(opz['Opzetstuk Noord (°)'], inplace=True)\n return opz", "def test_format_data(self, meteo):\n meteo.config.climate.meteo.station_id = '889'\n meteo.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, i, 0, 0), 215.0)\n for i in range(24)]\n line = next(meteo.format_data('air_temperature'))\n assert line == '889 2011 09 25 42' + ' 215.00' * 24 + '\\n'", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def correct_weather_data(df):\n\n columns = {'Date UTC': 'date',\n 'T¬∞ (C)': 'temperature',\n 'P (hPa)': 'pression',\n 'HR (%)': 'HR',\n 'P.ros√©e (¬∞C)': 'rosee',\n 'Visi (km)': 'visibilite',\n 'Vt. moy. (km/h)': 'v_moy',\n 'Vt. raf. (km/h)': 'v_raf',\n 'Vt. dir (¬∞)': 'v_dir',\n 'RR 3h (mm)': 'RR3h',\n 'Neige (cm)': 'neige',\n 'Nebul. (octats)': 'nebul'}\n\n df = df.rename(columns=columns)\n df['date'] = df['date'].str.replace('h', ':')\n df['date'] = pd.to_datetime(df['date'], dayfirst=True)\n\n return df", "def parse_timestamps(tags, ignore_errors):\n\n if tags.get(\"StudyDate\"):\n tags[\"StudyDateTime\"] = mk_time(tags.get(\"StudyDate\"), tags.get(\"StudyTime\"))\n\n if tags.get(\"SeriesDate\"):\n tags[\"SeriesDateTime\"] = mk_time(tags.get(\"SeriesDate\"), tags.get(\"SeriesTime\"))\n\n if not tags.get(\"SeriesDateTime\"):\n tags[\"SeriesDateTime\"] = tags.get(\"StudyDateTime\")\n\n if not tags.get(\"SeriesDateTime\"):\n err = \"No series creation time identified\"\n handle_errors(err, ignore_errors)\n\n if tags.get(\"InstanceCreationDate\"):\n tags[\"InstanceCreationDateTime\"] = mk_time(tags.get(\"InstanceCreationDate\"),\n tags.get(\"InstanceCreationTime\"))\n\n if not tags.get(\"InstanceCreationDateTime\"):\n tags[\"InstanceCreationDateTime\"] = tags.get(\"SeriesDateTime\") or \\\n tags.get(\"StudyDateTime\")\n\n if not tags.get(\"InstanceCreationDateTime\"):\n err = \"No instance creation time identified\"\n handle_errors(err, ignore_errors)\n\n return tags", "def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]", "def getTemperatureMeasurements(self):\n # self.board.readline()\n self.stop = False\n times = []\n temps = [[], [], []]\n \n # A synchronisation string containing the characters tx is sent before each set of measurements,\n # we ensure correct reading of the measurements by waiting for this string\n while str(self.board.readline()).strip('b\\'\\\\rn') != 'tx':\n pass\n \n while not self.stop:\n # A synchronisation string containing the characters tx is sent before each set of measurements\n tx = self.board.readline()\n if str(tx).strip('b\\'\\\\rn') == 'tx':\n rawData1 = self.board.readline()\n rawData2 = self.board.readline()\n rawData3 = self.board.readline()\n rawData4 = self.board.readline()\n \n \n timeStamp = str(rawData1).strip('b\\'\\\\rn')\n temp1 = str(rawData2).strip('b\\'\\\\rn')\n temp2 = str(rawData3).strip('b\\'\\\\rn')\n temp3 = str(rawData4).strip('b\\'\\\\rn')\n try:\n times.append(float(timeStamp) / 1000)\n temps[0].append(float(temp1) / 128)\n temps[1].append(float(temp2) / 128)\n temps[2].append(float(temp3) / 128)\n # print(f'\\rtime: {float(timeStamp) / 1000:.2f} s, Temperature measured on sensor 1: {float(temp1) / 128:.2f} °C,'\n # f'sensor 2: {float(temp2) / 128:.2f} °C, sensor 3: {float(temp3) / 128:.2f} °C', sep='', end='', flush=True)\n except:\n print(rawData1, rawData2, rawData3, rawData4)\n \n \n if self.stop:\n print('\\nMeasurement finished...')\n \n self.data_stack[self.fetch_kinds[0]] = times\n self.data_stack[self.fetch_kinds[1]] = temps[0]\n self.data_stack[self.fetch_kinds[2]] = temps[1]\n self.data_stack[self.fetch_kinds[3]] = temps[2]\n \n if (len(self.data_stack['Sensor 1 Temp']) != len(times) or len(self.data_stack['Sensor 2 Temp']) != len(times) or len(self.data_stack['Sensor 3 Temp']) != len(times)):\n print(\"Warning: There may be some missing values!\")", "def emit_metric(ts, data):\n clean = [cleaner(d) for d in data]\n # print clean\n # check that clean[0] do not start with a number\n (n0,v0) = clean[0]\n if n0 is not None:\n # print 'error: do not understand metric' \n return\n\n if len(clean) == 2:\n (n1,v1) = clean[1]\n return '{0}.{1} {2} {3}'.format(v0, v1, ts, n1)\n elif len(clean) == 3:\n (n1,v1) = clean[1]\n (n2,v2) = clean[2]\n return '{0}.{1}.{2} {3} {4}'.format(v0, v1, v2, ts, n2)", "def data_format(self):\n for description in SENSOR_TYPES:\n if description.key not in self.conditions:\n continue\n attr_key = description.key\n try:\n if description.primary_key in self.raw_data:\n sensor_value = self.raw_data[description.primary_key][\n description.sensor_key\n ]\n # Format sensor for better readability\n if attr_key == ATTR_NEW_VERSION and sensor_value == \"0.0.0.0\":\n sensor_value = \"Latest\"\n elif attr_key == ATTR_UPTIME:\n sensor_value = round(sensor_value / (3600 * 24), 2)\n elif attr_key == ATTR_LAST_RESTART:\n last_restart = dt_util.now() - timedelta(seconds=sensor_value)\n sensor_value = last_restart.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif attr_key == ATTR_STATUS:\n if sensor_value:\n sensor_value = \"Online\"\n else:\n sensor_value = \"Offline\"\n elif (\n attr_key == ATTR_LOCAL_IP and not self.raw_data[\"wan\"][\"online\"]\n ):\n sensor_value = None\n\n self.data[attr_key] = sensor_value\n except KeyError:\n _LOGGER.error(\n (\n \"Router does not support %s field. \"\n \"Please remove %s from monitored_conditions\"\n ),\n description.sensor_key,\n attr_key,\n )\n self.data[attr_key] = None", "def _load_time(self):\n\n time_variables = ('time', 'Times', 'Itime', 'Itime2')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in self.ds.variables:\n setattr(self.time, time, self.ds.variables[time][:])\n got_time.append(time)\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[time], attribute))\n setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n if 'Times' in got_time:\n # Overwrite the existing Times array with a more sensibly shaped one.\n self.time.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.time.Times])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time' variable\n # is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n try:\n self.time.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except ValueError:\n self.time.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time.time = date2num(_dates, units='days since 1858-11-17 00:00:00')\n # Add the relevant attributes for the time variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units='days since 1858-11-17 00:00:00')\n self.time.Itime = np.floor(_datenum)\n self.time.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 # microseconds since midnight\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime', attributes)\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.time.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.time.Times])\n except ValueError:\n self.time.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.time.Times])\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n setattr(self.atts, 'datetime', attributes)\n else:\n self.time.datetime = _dates\n self.time.matlabtime = self.time.time + 678942.0 # convert to MATLAB-indexed times from Modified Julian Date.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n setattr(self.atts, 'matlabtime', attributes)\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n if all([isinstance(i, (datetime, str)) for i in self._dims['time']]):\n # Convert datetime dimensions to indices in the currently loaded data.\n self._dims['time'][0] = self.time_to_index(self._dims['time'][0])\n self._dims['time'][1] = self.time_to_index(self._dims['time'][1]) + 1 # make the indexing inclusive\n for time in self.obj_iter(self.time):\n setattr(self.time, time, getattr(self.time, time)[self._dims['time'][0]:self._dims['time'][1]])\n self.dims.time = len(self.time.time)", "def convert_timestamp_to_object(data):\n for k, value in data.items():\n value_type = value.split(\"::\", 1)[0]\n if value_type == \"datetime\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = datetime.fromtimestamp(timestamp)\n elif value_type == \"date\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = date.fromtimestamp(timestamp)\n data[k] = value\n return data", "def prepare_data(self, data):\n for i, v in data.items():\n field_type = self.get_field_type(i)\n #log.info('i = %s, type = %s', i, field_type)\n if field_type == 'datetime' and isinstance(v, (str, unicode)):\n data[i] = datetime_from_string(v)\n return data", "def test_parse_weather_two_missing_temperature(self):\n data = copy.deepcopy(self.weather_two)\n\n # Remove a temperature entry.\n del data['data'][1]['TowerDryBulbTemp']\n\n actual = timeseries.parse_weather(data)\n\n # We'll have a NaN.\n self.assertTrue(actual['temperature'].isna().any())", "def test_temperatures_when_data_present(self):\n\n temp_data = [(1.00, time.localtime()), (2.00, time.localtime()),\n (3.00, time.localtime()), (4.00, time.localtime())]\n\n tt = TemperatureTracker(temp_data)\n result = tt.temperatures()\n for i in range(0, len(result)):\n self.assertEqual(result[i][0], temp_data[i][0])\n self.assertEqual(result[i][1], temp_data[i][1])", "def _prep_times(self):\n self.test_times = 'diagonal'\n if hasattr(self, 'times'):\n self.train_times = self.times\n if hasattr(self, 'times_'):\n self.train_times_ = self.times_\n self.test_times_ = _DecodingTime()\n self.test_times_['slices'] = [[slic] for slic in\n self.train_times_['slices']]\n self.test_times_['times'] = [[tim] for tim in\n self.train_times_['times']]\n if hasattr(self, 'scores_'):\n self.scores_ = [[score] for score in self.scores_]\n if hasattr(self, 'y_pred_'):\n self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]", "def format_odometer(raw) -> dict:\r\n instruments: dict = {}\r\n for instrument in raw:\r\n instruments[instrument[\"type\"]] = instrument[\"value\"]\r\n if \"unit\" in instrument:\r\n instruments[instrument[\"type\"] + \"_unit\"] = instrument[\"unit\"]\r\n\r\n return instruments", "def generate_time_data(self):\n # generate random dates and append to a list\n sd = self.start_date\n ed = self.end_date\n dates = [random_date(start=sd, end=ed) for d in range(0, self.obs)]\n\n # convert to ISO 8601 format and update \"Local Time\" field\n self.output['Local Time'] = map(lambda x: x.isoformat(), dates)", "def __init__(self, location, type, log_df = TWEET_HISTORY_DF):\n self.location = location\n self.type = type\n self.lat = c.LOCATIONS[self.location]['lat']\n self.lon = c.LOCATIONS[self.location]['lon']\n self.sunsetwx_response = py_sunsetwx.get_quality(self.lat, self.lon, self.type)\n self.log_df = log_df \n \n ## Find the time of the sunrise/sunset\n #### Lookup civil time of \"dawn\" if sunrise, \"dusk\" if sunset\n if type == 'sunrise':\n self.dawn_dusk = 'dawn'\n elif type == 'sunset':\n self.dawn_dusk = 'dusk'\n self.utc_time = pytz.utc.localize(datetime.strptime(self.sunsetwx_response['features'][0]['properties'][self.dawn_dusk]['civil'], '%Y-%m-%dT%H:%M:%SZ'))\n self.time_converted = self.utc_time.astimezone(pytz.timezone(c.LOCATIONS[location]['timezone']))", "def _perform_data_conversion(self):\n self.data = []\n items = 0\n for value in self.elements_to_convert:\n try:\n location = parse_int(value.get('location_id'), nullable=False)\n if not value.get('list', []):\n continue\n for obs in value['list']:\n items += 1\n # Setting timezone to pytz.UTC FIXES [BUG-039].\n timestamp = parse_date_utc(obs.get('dt') * 1000)\n date = timestamp.date()\n time = timestamp.time()\n temperature = parse_int(obs['main'].get('temp'))\n pressure = parse_float(obs['main'].get('pressure'))\n humidity = parse_int(obs['main'].get('humidity'))\n wind_speed = parse_int(obs.get('wind', {}).get('speed'))\n wind_degrees = parse_int(obs.get('wind', {}).get('deg'))\n wind_direction = compute_wind_direction(wind_degrees)\n weather = obs.get('weather', [{}])[0]\n if weather.get('icon') and weather.get('id'):\n weather = - parse_int(weather.get('id'), nullable=False) if 'n' in weather['icon'] else \\\n parse_int(weather.get('id'), nullable=False)\n self.data.append(WeatherForecastObservation(location_id=location, date=date, time=time,\n temperature=temperature, pressure=pressure, humidity=humidity, wind_speed=wind_speed,\n wind_degrees=wind_degrees, wind_direction=wind_direction, weather_id=weather))\n except (ValueError, AttributeError, KeyError, IndexError, TypeError):\n _id = value.get('_id', 'Unknown ID')\n self.logger.exception('An error occurred while parsing data. WeatherForecastObservation with ID \"%s\" '\n 'will not be converted.' % _id)\n self.state['elements_to_convert'] = items", "def get_weather(self, time=None, location=None):\n req = requests.get(self.source_url)\n text = req.text\n moment = self.extract_datetime(text)\n met_data = self.parse_hms_data(text)\n met_data['time'] = moment\n met_data['text'] = text\n return self.source_label, met_data", "def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second", "def convert_generic_timestamps(result: ResponseObject) -> ResponseObject:\n # Format inner record if present, e.g. for search results\n if 'record' in result:\n result['record'] = convert_generic_timestamps(result['record'])\n return result\n\n for field in GENERIC_TIME_FIELDS:\n datetime_obj = try_datetime(result.get(field, ''))\n if datetime_obj:\n result[field] = datetime_obj\n return result", "def test_fits_to_time_meta(self, table_types):\n t = table_types()\n t['a'] = Time(self.time, format='isot', scale='utc')\n t.meta['DATE'] = '1999-01-01T00:00:00'\n t.meta['MJD-OBS'] = 56670\n\n # Test for default write behaviour (full precision) and read it\n # back using native astropy objects; thus, ensure its round-trip\n t.write(self.temp('time.fits'), format='fits', overwrite=True)\n tm = table_types.read(self.temp('time.fits'), format='fits',\n astropy_native=True)\n\n # Test DATE\n assert isinstance(tm.meta['DATE'], Time)\n assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'\n assert tm.meta['DATE'].format == 'fits'\n # Default time scale according to the FITS standard is UTC\n assert tm.meta['DATE'].scale == 'utc'\n\n # Test MJD-xxx\n assert isinstance(tm.meta['MJD-OBS'], Time)\n assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']\n assert tm.meta['MJD-OBS'].format == 'mjd'\n assert tm.meta['MJD-OBS'].scale == 'utc'\n\n # Explicitly specified Time Scale\n t.meta['TIMESYS'] = 'ET'\n\n t.write(self.temp('time.fits'), format='fits', overwrite=True)\n tm = table_types.read(self.temp('time.fits'), format='fits',\n astropy_native=True)\n\n # Test DATE\n assert isinstance(tm.meta['DATE'], Time)\n assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'\n assert tm.meta['DATE'].scale == 'utc'\n\n # Test MJD-xxx\n assert isinstance(tm.meta['MJD-OBS'], Time)\n assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']\n assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']]\n\n # Test for conversion of time data to its value, as defined by its format\n t['a'].info.serialize_method['fits'] = 'formatted_value'\n t.write(self.temp('time.fits'), format='fits', overwrite=True)\n tm = table_types.read(self.temp('time.fits'), format='fits')\n\n # Test DATE\n assert not isinstance(tm.meta['DATE'], Time)\n assert tm.meta['DATE'] == t.meta['DATE']\n\n # Test MJD-xxx\n assert not isinstance(tm.meta['MJD-OBS'], Time)\n assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS']\n\n assert (tm['a'] == t['a'].value).all()", "def test_str_time_1(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"time_1\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x5E,\n 0x1E,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 32071.68)\n self.assertEqual(sensor.unit_of_measurement(), \"s\")\n self.assertEqual(sensor.ha_device_class(), None)", "def format_odometer(raw: list) -> dict:\n instruments: dict = {}\n for instrument in raw:\n instruments[instrument[\"type\"]] = instrument[\"value\"]\n if \"unit\" in instrument:\n instruments[instrument[\"type\"] + \"_unit\"] = instrument[\"unit\"]\n\n return instruments", "def to_WTH_converter(self, weather_data, dest_dir):\n ds_all = weather_data.get_global_dataset()\n if self.country is None:\n print(\"Country given is erroneous:\")\n return\n elif self.country == \"globe\":\n lon_num_start = 0\n lon_num_stop = weather_data.get_num_of_attribute('longitude')\n lat_num_start = 0\n lat_num_stop = weather_data.get_num_of_attribute('latitude')\n else:\n lon_num_start, lon_num_stop, lat_num_start, lat_num_stop = weather_data.get_country_boundary(self.country)\n\n\n # top bottom, left to right\n lon_num_start = 397\n lat_num_start = 184\n for lon_i in range(lon_num_start, lon_num_stop + 1):\n # for lon_i in range(lon_num_start, lon_num_stop+1):\n lon = ds_all.longitude.isel(longitude=lon_i).values.tolist()\n\n for lat_i in range(lat_num_start, lat_num_stop+1):\n # for lat_i in range(lat_num_start, lat_num_stop + 1):\n lat = ds_all.latitude.isel(latitude=lat_i).values.tolist()\n\n # create a dynamic header with updated LON, LAT info and move it into the folder given\n wth_header_u = ut.format_header(lat_i + 1, lon_i + 1, lat, lon)\n wth_header = dest_dir + \"/\" + wth_header_u\n shutil.move(wth_header_u, wth_header)\n\n # open in appending mode\n fwth = open(wth_header, \"a+\")\n\n # loop through daily weather data\n for t, date in enumerate(self.years):\n daily_data_vars = ut.get_daily_data_vars(ds_all, lat_i, lon_i, t)\n # disregard all NAN values\n if daily_data_vars is None:\n fwth.close()\n os.remove(wth_header)\n break\n\n if t == 0:\n ut.update_table(wth_header_u, lat, lon)\n\n entry = ut.format_data_vars_entry(daily_data_vars, date)\n\n # append this entry into the file\n fwth.write(entry)\n print(\"Added entry:\", entry)\n\n # close file after writing\n fwth.close()\n print(\"Output WTH:\", wth_header)", "def clean_station_data(station_df):\n # TODO implement data preparation here\n # Fix the datetime field\n\n # Cast to numeric fields where necessary\n\n # Interpolate missing data", "def _convert_asset_timestamp_fields(dict_):\n for key in _asset_timestamp_fields & viewkeys(dict_):\n value = pd.Timestamp(dict_[key], tz='UTC')\n dict_[key] = None if isnull(value) else value\n return dict_", "def extract_times(raw_times_dict):\n actual_times = {}\n if raw_times_dict[\"realtime\"] is not None:\n actual_times[\"realtime\"] = raw_times_dict[\"realtime_t\"]\n\n if raw_times_dict[\"realtime_noloads\"] is not None:\n actual_times[\"realtime_noloads\"] = raw_times_dict[\"realtime_noloads_t\"]\n\n if raw_times_dict[\"ingame\"] is not None:\n actual_times[\"ingame\"] = raw_times_dict[\"ingame_t\"]\n\n return actual_times", "def clean_weather(weather):\n\n weather.replace(\"M\", float(\"NaN\"), inplace=True)\n weather.replace(\"-\", float(\"NaN\"), inplace=True)\n weather.replace(\"T\", float(\"NaN\"), inplace=True)\n weather.replace(\" T\", float(\"NaN\"), inplace=True)\n weather.replace(\" T\", float(\"NaN\"), inplace=True)\n weather.drop(\"CodeSum\", axis=1, inplace=True)\n\n return merge_weather(weather)", "def update_temperature_values(self):\n year = self._current_date.year\n month = self._current_date.month\n\n self.ensure_temperatures(dt.date(year, month, 15))\n self.set_temperature_arrays(dt.date(year, month, 15))", "def convert_object_to_timestamp(data):\n for k, value in data.items():\n if isinstance(value, (datetime, date)):\n value = \"::\".join(\n [type(value).__name__, \"%d\" % time.mktime(value.timetuple())]\n )\n data[k] = value\n return data", "def create(records):\n version = '1.0.0'\n\n iversion = [int(x) for x in version.split('.')]\n if iversion[1] > 0 or iversion[2] > 0:\n raise IOError(\"SEF versions > 0.0 are not supported\")\n\n latitude = 42.331\n longitude = -83.046\n altitude = 'NA'\n\n header = {\n 'SEF': version, 'ID': 'Detroit_Anthon', 'Name': 'Detroit, MI',\n 'Lat': latitude, 'Lon': longitude, 'Alt': altitude, 'Source': 'C3S-DRS',\n 'Link': '', 'Vbl': 'ta', 'Stat': 'point',\n 'Units': 'C', 'Meta': 'Observer=George Christian Anthon',\n }\n\n index_temperatures = 0\n index_times = 0\n\n time_offset = longitude * 12 / 180\n\n temp_dict = defaultdict(list)\n\n temperatures = []\n\n times = [datetime.time(7, 0), datetime.time(12, 0), datetime.time(20, 0)]\n original_time = [\"7:00AM\", \"12:00PM\", \"20:00PM\"]\n\n for index in range(len(records)):\n temperatures.append(records[index][datetime.time(7, 0)])\n temperatures.append(records[index][datetime.time(12, 0)])\n temperatures.append(records[index][datetime.time(20, 0)])\n for time in original_time:\n if isinstance(temperatures[index_temperatures], str):\n value = 'NA'\n else:\n value = round(((float(temperatures[index_temperatures]) - 32) * 5 / 9), 1)\n\n date = str(records[index]['Year']) \\\n + \"-\" \\\n + str(records[index]['Month']) \\\n + \"-\" + str(records[index]['Day']) \\\n + \" \" + str(times[index_times])\n\n date_time = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n utc = date_time - datetime.timedelta(hours=time_offset)\n\n year = str(utc)[:4]\n month = str(utc)[5:7]\n day = str(utc)[8:10]\n hour = str(utc)[11:13]\n minutes = str(utc)[14:16]\n\n data_dict = {\n 'Data': pd.DataFrame({\n 'Year': year,\n 'Month': month,\n 'Day': day,\n 'Hour': hour,\n 'Minute': minutes,\n 'Period': 0,\n 'Value': value,\n 'Meta': \"orig=\" + str(temperatures[index_temperatures])\n + 'F' + \"|orig.time=\" + str(time)\n + \"|orig.date=\" + str(records[index]['Year']) + '-' + str(records[index]['Month'])\n + '-' + str(records[index]['Day'])\n\n }, index=[0])\n }\n temp_dict['Data'].append(data_dict['Data'])\n\n index_times += 1\n if index_times > 2:\n index_times = 0\n\n index_temperatures += 1\n\n header.update(temp_dict)\n\n return header", "def clean_timelog():", "def get_average_weather_data(\n self, timestamps: list, weather: list,\n ) -> list:\n weather_list = weather\n extended_weather_list = []\n\n for timestamp in timestamps:\n before_after = self.__find_nearest_weathers(\n timestamp, weather_list,\n )\n before = before_after['before']\n after = before_after['after']\n # Weight depends on the proximity to both of the nearest\n # Weather objects so that weather can be averaged out.\n weight_a = 1 - (\n before['seconds'] / (after['seconds'] + before['seconds'])\n )\n avg_weather_at_timestamp = AverageWeather(\n before['weather'], after['weather'], weight_a=weight_a,\n )\n extended_weather_list.append(avg_weather_at_timestamp)\n\n return extended_weather_list", "def create_time_s(df, medidor, freq='15T'):\n dates_complete = pd.date_range('1/18/2013', '02/09/2014', freq='15T')\n # this dates take them from the file\n my_complete_series = pd.Series(dates_complete)\n frame1 = my_complete_series.to_frame()\n frame1.columns = ['key']\n merged = pd.merge(frame1, df, on='key', how='outer')\n merged = merged.sort('key')\n # fill the merged file with the number of the meter\n merged['medidor'].fillna(medidor, inplace=True)\n\n return merged", "def postprocessData(meta, units, data):\n\n data['time'] = np.arange(0, meta['dt'] * len(data), meta['dt'])\n units['time'] = 's'\n\n meta, units, data = self.calculateForce(meta, units, data)\n\n data['distance'] = np.sqrt(data.xDist**2 + data.yDist**2)\n units['distance'] = 'nm'\n\n return meta, units, data", "def parse_temperature(prod, regime, lines, data):\n for linenum, line in enumerate(lines):\n if len(line.strip()) < 18:\n continue\n # Repair a broken (E) product, see akrherz/pyIEM#08\n if line[20:23] == \"(E)\" and line[38] == \" \":\n prod.warnings.append(f\"Invalid line repaired |{line}|\")\n line = line.replace(\"(E)\", \"E \")\n tokens = make_tokens(regime, line)\n key = tokens[0].strip().lower()\n if key.upper() not in [\"MAXIMUM\", \"MINIMUM\", \"AVERAGE\"]:\n continue\n data[f\"temperature_{key}\"] = get_number(tokens[1])\n if tokens[2] is not None:\n data[f\"temperature_{key}_time\"] = tokens[2]\n if tokens[3] is not None:\n data[f\"temperature_{key}_record\"] = get_number(tokens[3])\n if tokens[4] is not None and tokens[4].strip() not in [\"\", \"M\", \"MM\"]:\n n = get_number_year(tokens[4])\n if n is not None:\n data[f\"temperature_{key}_record_years\"] = [n]\n else:\n prod.warnings.append(f\"Found invalid year |{tokens[4]}|\")\n if tokens[5] is not None:\n data[f\"temperature_{key}_normal\"] = get_number(tokens[5])\n # Check next line(s) for more years\n while (linenum + 1) < len(lines) and len(\n lines[linenum + 1].strip()\n ) == 4:\n line2 = lines[linenum + 1].strip()\n n = get_number_year(line2)\n if n is not None:\n data.setdefault(\n f\"temperature_{key}_record_years\",\n [],\n ).append(n)\n else:\n prod.warnings.append(f\"Found invalid year |{line2}|\")\n linenum += 1", "def prepare_for_influxdb(df):\n df = df.drop(columns=\"landkreis\", errors=\"ignore\") # prevent name collision in get_ags()\n df = get_ags(df)\n df[\"time\"] = df.apply(lambda x: 1000000000*int(datetime.timestamp((pd.to_datetime(x[\"timestamp\"])))), 1)\n df[\"measurement\"] = \"hystreet\"\n df[\"origin\"] = \"https://hystreet.com\"\n df = df.rename(columns={\n 'station_id': '_id',\n 'pedestrians_count': 'pedestrian_count',\n 'state': 'bundesland'\n })\n df['ags'] = pd.to_numeric(df['ags'])\n # import pdb; pdb.set_trace()\n return df", "def convertData(data):\n for candle in data['candles']:\n candle['date'],candle['time'] = convertToEST(candle['datetime'])\n\n return data", "def get_times_and_labels(records, measurement_type):\n if measurement_type == \"w\":\n mean_time = [np.nanmean(w.waiting_times) for w in records]\n title = \"Distributions of waiting times over runtimes\"\n y_axis_label = \"Waiting Times\"\n else:\n mean_time = [np.nanmean(b.blocking_times) for b in records]\n title = \"Distributions of blocking times over runtimes\"\n y_axis_label = \"Blocking Times\"\n return mean_time, title, y_axis_label", "def generate_summary(weather_data):\n# 5 Day Overview\n# The lowest temperature will be 9.4°C, and will occur on Friday 02 July 2021.\n# The highest temperature will be 20.0°C, and will occur on Saturday 03 July 2021.\n# The average low this week is 12.2°C.\n# The average high this week is 17.8°C.\n\n\n Number_of_days=0\n Min_Value=[]\n Date_value=[]\n Max_Value=[]\n\n\n for rows in weather_data: \n if len(rows) != 0:\n Number_of_days = Number_of_days + 1\n Min_Value.append(rows[1])\n Date_value.append(str(rows[0]))\n Max_Value.append(rows[2])\n \n min_temperature,min_position = find_min(Min_Value)\n min_tempe_celcius = convert_f_to_c(min_temperature)\n occur_date_min = convert_date(Date_value[min_position])\n max_temperature,max_position = find_max(Max_Value)\n max_tempe_celcius = convert_f_to_c(max_temperature)\n occur_date_max = convert_date(Date_value[max_position])\n mean_low = calculate_mean(Min_Value)\n mean_low__tempe_celcius = convert_f_to_c(mean_low)\n mean_high = calculate_mean(Max_Value)\n mean_high__tempe_celcius = convert_f_to_c(mean_high)\n\n summary=\"\"\n summary+=f\"{Number_of_days} Day Overview\\n\"\n summary+=f\" The lowest temperature will be {format_temperature(min_tempe_celcius)}, and will occur on {occur_date_min}.\\n\"\n summary+=f\" The highest temperature will be {format_temperature(max_tempe_celcius)}, and will occur on {occur_date_max}.\\n\"\n summary+=f\" The average low this week is {format_temperature(mean_low__tempe_celcius)}.\\n\"\n summary+=f\" The average high this week is {format_temperature(mean_high__tempe_celcius)}.\\n\"\n\n return summary", "def make_temperature_map(time: u.s, field, instr, **kwargs):\n plot_settings = {'cmap': cm.get_cmap('inferno')}\n plot_settings.update(kwargs.get('plot_settings', {}))\n bins, bin_range = instr.make_detector_array(field)\n visible = is_visible(instr.total_coordinates, instr.observer_coordinate)\n hist_coordinates, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=visible)\n with h5py.File(instr.counts_file, 'r') as hf:\n try:\n i_time = np.where(u.Quantity(hf['time'],\n get_keys(hf['time'].attrs), ('unit', 'units')) == time)[0][0]\n except IndexError:\n raise IndexError(f'{time} is not a valid time in observing time for {instr.name}')\n weights = np.array(hf['electron_temperature'][i_time, :])\n units = u.Unit(get_keys(hf['electron_temperature'].attrs, ('unit', 'units')))\n hist, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=weights * visible)\n hist /= np.where(hist_coordinates == 0, 1, hist_coordinates)\n meta = instr.make_fits_header(field, instr.channels[0])\n del meta['wavelnth']\n del meta['waveunit']\n meta['bunit'] = units.to_string()\n meta['detector'] = 'Electron Temperature'\n meta['comment'] = 'Column-averaged electron temperature calculated by synthesizAR'\n\n return GenericMap(hist.T, meta, plot_settings=plot_settings)", "def data_to_packet(data, status=None, last_rain=None,\n sensor_map=DEFAULT_SENSOR_MAP,\n battery_map=DEFAULT_BATTERY_MAP):\n\n packet = {}\n packet['usUnits'] = weewx.METRIC\n packet['dateTime'] = int(time.time() + 0.5)\n\n packet['inTemp'] = data['t_in'] # T is degree C\n packet['inHumidity'] = data['h_in'] # H is percent\n packet['outTemp'] = data[sensor_map['outTemp']] \\\n if 'outTemp' in sensor_map else None\n packet['outHumidity'] = data[sensor_map['outHumidity']] \\\n if 'outHumidity' in sensor_map else None\n packet['UV'] = data['uv']\n\n packet['windSpeed'] = data['windspeed']\n if packet['windSpeed'] is not None:\n packet['windSpeed'] *= 1.60934 # speed is mph; weewx wants km/h\n if packet['windSpeed']:\n packet['windDir'] = data['winddir']\n if packet['windDir'] is not None:\n packet['windDir'] *= 22.5 # weewx wants degrees\n else:\n packet['windDir'] = None\n\n packet['windGust'] = data['windgust']\n if packet['windGust'] is not None:\n packet['windGust'] *= 1.60934 # speed is mph; weewx wants km/h\n if packet['windGust']:\n packet['windGustDir'] = data['winddir']\n if packet['windGustDir'] is not None:\n packet['windGustDir'] *= 22.5 # weewx wants degrees\n else:\n packet['windGustDir'] = None\n\n packet['rainTotal'] = data['rain']\n if packet['rainTotal'] is not None:\n packet['rainTotal'] *= 0.06578 # weewx wants cm\n packet['rain'] = weewx.wxformulas.calculate_rain(\n packet['rainTotal'], last_rain)\n\n # station calculates windchill\n packet['windchill'] = data['windchill']\n\n # station reports baromter (SLP)\n packet['barometer'] = data['slp']\n\n # insert values for extra sensors if they are available\n for label in sensor_map:\n packet[label] = data[sensor_map[label]]\n\n # insert values for battery status if they are available\n if status is not None:\n for label in battery_map:\n packet[label] = status[battery_map[label]]\n\n return packet", "def temperatures():\n\n return station_9281", "def jsonData_to_dataset_in_timedifference_us(data):\n\n the_cols = ['x', 'y', 'z', 'timestamp', 'label', 'hand', 'annotator']\n the_data = []\n\n for value in data:\n the_raws = []\n the_indxs = []\n idx = 0\n raw_time_us = 0\n for raw in value['raws']:\n raw_time_us += int(raw['timestamp'])/1000\n the_raws.append([raw['x'], raw['y'], raw['z'], int(\n raw_time_us), value['label'], value['hand'], value['annotator']])\n the_indxs.append(idx)\n idx += 1\n the_data.append(pd.DataFrame(the_raws, the_indxs, the_cols))\n return the_data", "def __process_times(raw_array: list, num_row: int):\n\n # format strings used in datetime.time().strftime()\n full_time_format = '%H:%M:%S'\n hours_time_format = '%H:00:00'\n minutes_time_format = '00:%M:00'\n seconds_time_format = '00:00:%S'\n\n # booleans for telling what the integers in the second column represent\n integer_hours = False\n integer_minutes = False\n integer_seconds = False\n\n # boolean for if the second column is comprised of integers\n integer_timestamps = False\n\n # this loop breaks as soon as it finds an integer in the second column\n # if no integer is found, we try to parse it as a pandas.Timestamp,\n # if this fails, the entry is not something we can parse\n for i in range(num_row):\n time_string = raw_array[i][1]\n\n if time_string.isnumeric():\n integer_timestamps = True\n break\n\n try:\n time = pd.Timestamp(time_string)\n time = time.strftime(full_time_format)\n raw_array[i][1] = time\n\n except ValueError:\n sys.stdout.write(f\"ERROR: {time_string}\"\n f\" cannot be parsed as a time value\\n\")\n raise TypeError\n\n # this will be the first thing we run into after\n # encountering an integer in the previous loop\n if integer_timestamps:\n\n # we need to find what the maximum value is in the second column\n # to decide if it represents seconds, minutes, or hours\n max_value = 0\n min_value = 1\n\n # find the max and min value until the max value wraps around to 0\n # when this happens:\n # if max is 60, we're in minutes\n # if max is 24, we're in hours\n # else, we're in seconds\n for i in range(num_row):\n number = int(raw_array[i][1])\n\n if number < min_value:\n min_value = 0\n\n if number > max_value:\n max_value = number\n else:\n if number == min_value:\n if (max_value == 60 and min_value == 1) \\\n or (max_value == 59 and min_value == 0):\n integer_minutes = True\n elif (max_value == 24 and min_value == 1) \\\n or (max_value == 23 and min_value == 0):\n integer_hours = True\n break\n\n # it's possible that we reached the end of the file\n # without looping around to 0, in this case, the max_value\n # was never reached so we assume this column represents seconds\n # it is also possible that we wrapped around at 0 and\n # just didn't meet the conditions to consider the row\n # as minutes or hours. We'll still use seconds in this case\n if max_value > 60:\n integer_seconds = True\n\n # basic switch case for putting each entry of\n # second column into the desired format\n if integer_seconds:\n for i in range(num_row):\n number = int(raw_array[i][1])\n raw_array[i][1] = \\\n datetime.time(number).strftime(seconds_time_format)\n elif integer_minutes:\n for i in range(num_row):\n number = int(raw_array[i][1])\n raw_array[i][1] = \\\n datetime.time(number).strftime(minutes_time_format)\n elif integer_hours:\n for i in range(num_row):\n number = int(raw_array[i][1])\n raw_array[i][1] = \\\n datetime.time(number).strftime(hours_time_format)\n\n return", "def parse_time(self):\n\n # parse time\n year = int(self.start[:4])\n month = int(self.start[5:7])\n day = int(self.start[8:10])\n hours = int(self.start[11:13])\n minutes = int(self.start[14:16])\n seconds = int(self.start[17:19])\n time = datetime.datetime(year, month, day, hours, minutes, seconds)\n\n # advance time\n time = time + datetime.timedelta(minutes=self.rain_interval)\n time = time.isoformat(\" \")\n\n # timestamp\n # elevation (m)\n evolved_elevation = (\n 'elevation_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # water depth (m)\n depth = (\n 'depth_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # sediment flux (kg/ms)\n sediment_flux = (\n 'flux_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # erosion-deposition (kg/m2s)\n erosion_deposition = (\n 'erosion_deposition_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # elevation difference (m)\n difference = (\n 'difference_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n\n return (evolved_elevation, time, depth, sediment_flux,\n erosion_deposition, difference)", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def test_temperatures_when_data_is_not_present(self):\n\n tt = TemperatureTracker()\n result = tt.temperatures()\n self.assertEqual(result, [])", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def _setupWeather(self, w, config):\n wnames = ('cloud', 'seeing')\n if w not in wnames:\n raise Exception('w should be one of %s' %(wnames))\n filename = config['%s_datafile' %(w)]\n file = open(filename, 'r')\n # Also assume flat file contains only date / value in a space or tab separated file. \n self.dates[w] = []\n self.weather[w] = []\n # Read the data file.\n print '# Reading weather data file %s' %(filename)\n for line in file:\n if line.startswith('#') | line.startswith('!'):\n continue\n self.dates[w].append(line.split()[0])\n self.weather[w].append(line.split()[1])\n file.close()\n self.dates[w] = numpy.array(self.dates[w], float)\n self.weather[w] = numpy.array(self.weather[w], float)\n # Check the total amount of data (mostly for user awareness):\n print '# Read %d weather values from %s file. ' %(len(self.weather[w]), filename)\n # Check that weather data is monotonically increasing in time. \n if not(numpy.all(numpy.diff(self.dates[w]))):\n order = self.dates[w].argsort()\n self.weather[w] = self.weather[w][order]\n self.dates[w] = self.dates[w][order]\n # Get the total length of time included in this (seeing/cloud) file,\n # so that we can determine a wrap-around date if we need that.\n self.maxtime[w] = self.dates[w].max()\n return", "def scale_time_to(recs, unit):\n\n for r in recs:\n if unit == 'd':\n r.t = [t / 3600 / 24 for t in r.time]\n elif unit == 'hours':\n r.t = [t / 3600 for t in r.time]\n elif unit == 'min':\n r.t = [t / 60 for t in r.time]\n elif unit in ('s', 'sec'):\n r.t = r.time\n else:\n Exception('Wrong time unit')\n\n Records.time_unit = unit\n Records.time_label = 'Time (' + unit + ')'", "def jsonData_to_dataset_in_time_format(data):\n\n the_cols = ['x', 'y', 'z', 'timestamp', 'label', 'hand', 'annotator']\n the_data = []\n\n for value in data:\n the_raws = []\n the_indxs = []\n idx = 0\n cur_time = datetime.datetime.strptime(\n value['createdAt']['$date'], '%Y-%m-%dT%H:%M:%S.%fZ')\n for raw in value['raws']:\n micro = int(raw['timestamp'])/1000\n raw_time = cur_time + datetime.timedelta(microseconds=micro)\n the_raws.append([raw['x'], raw['y'], raw['z'], int(\n raw_time), value['label'], value['hand'], value['annotator']])\n cur_time = raw_time\n the_indxs.append(idx)\n idx += 1\n the_data.append(pd.DataFrame(the_raws, the_indxs, the_cols))\n return the_data", "def temp_series(smhi_data):\n consumable_data = {\n \"station\": smhi_data[\"station\"][\"name\"],\n \"temp\": [],\n \"from\": smhi_data[\"value\"][0][\"date\"],\n \"to\": smhi_data[\"value\"][-1][\"date\"]\n }\n for temp_post in smhi_data[\"value\"]:\n consumable_data[\"temp\"].append(float(temp_post[\"value\"]))\n return consumable_data", "def _parse_timestamps(self):\n if self.detectors is None and not isinstance(self.timestamps, dict):\n raise ValueError(\n \"Detector names must be given either as a key in\"\n \" a `timestamps` dict or explicitly via `detectors`.\"\n )\n\n if isinstance(self.timestamps, str):\n numTS = len(self.timestamps.split(\",\"))\n numDets = len(self.detectors.split(\",\"))\n if not numTS == numDets:\n raise ValueError(\n \"Inconsistent length of comma-separated\"\n f\" `timestamps` and `detectors`: {numTS}!={numDets}\"\n )\n return\n\n if isinstance(self.timestamps, dict):\n # Each key should correspond to `detectors` if given;\n # otherwise, construct detectors from the given keys.\n ifos = list(self.timestamps.keys())\n input_timestamps = self.timestamps.values()\n\n if self.detectors is not None:\n ifos_in_detectors = self.detectors.split(\",\")\n if np.setdiff1d(ifos, ifos_in_detectors).size:\n raise ValueError(\n f\"Detector names in timestamps dictionary ({ifos}) \"\n f\"are inconsistent with detector names given via keyword ({ifos_in_detectors}).\"\n )\n else:\n self.detectors = \",\".join(ifos)\n else:\n # Otherwise, assume it's a single list of timestamps,\n # and replicate it for each detector.\n ifos = self.detectors.split(\",\")\n input_timestamps = [self.timestamps for i in ifos]\n\n # If this point was reached, it means we should create timestamps files.\n timestamp_files = []\n for ind, ts in enumerate(input_timestamps):\n output_file = os.path.join(\n self.outdir, f\"{self.label}_timestamps_{ifos[ind]}.csv\"\n )\n np.savetxt(output_file, ts.reshape(-1, 1), fmt=\"%d\")\n timestamp_files.append(output_file)\n self.timestamps = \",\".join(timestamp_files)", "def get_weather_report(takeoff,weather):\n # HINT: Looping through the dictionary is VERY slow because it is so large\n # You should convert the takeoff time to an ISO string and search for that first.\n # Only loop through the dictionary as a back-up if that fails.\n \n # Search for time in dictionary\n # As fall back, find the closest time before takeoff\n \n from dateutil.parser import parse\n \n result = []\n takeofftime = takeoff.isoformat()\n \n if takeofftime in weather.keys():\n result = weather[takeofftime]\n \n elif takeofftime not in weather.keys():\n weatherlist = list(weather.keys())\n count = len(weatherlist)\n for m in weatherlist[::-1]:\n if m < takeofftime:\n result = weather[m]\n \n else: \n result = None\n \n \n return result", "def __init__(self, measurement, tags, fields, time_stamp):\n self.measurement = measurement\n self.tags = tags\n self.fields = fields\n self.time = time_stamp", "def parseApi(data):\n influxOut = []\n\n for output in data:\n for values in output['data']:\n for k, v in values.items():\n ksec = int(k) / 1000\n valTime = datetime.datetime.fromtimestamp(int(ksec))\n val = v\n cid = output['cid']\n sid = output['sid']\n\n influxOut.append(\n {\"measurement\": \"kWm\",\n \"tags\": {\n \"cid\": cid,\n \"sid\": sid,\n },\n \"time\": valTime.isoformat(),\n \"fields\": {\n \"power\": val,\n },\n }\n )\n\n return(influxOut)", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def _get_setup_from_timestamps(self):\n self._parse_timestamps()\n IFOs = self.detectors.split(\",\")\n # at this point, it's definitely a comma-separated string\n tsfiles = self.timestamps.split(\",\")\n if len(IFOs) != len(tsfiles):\n raise ValueError(\n f\"Length of detectors=='{self.detectors}'\"\n f\" does not match that of timestamps=='{self.timestamps}'\"\n f\" ({len(IFOs)}!={len(tsfiles)})\"\n )\n tstart = []\n tend = []\n self.sftfilenames = [] # This refers to the MFD output!\n for X, IFO in enumerate(IFOs):\n tsX = np.genfromtxt(tsfiles[X], comments=\"%\")\n if tsX.ndim > 1:\n logger.warning(\n f\"Timestamps file {tsfiles[X]} has more than 1 column,\"\n \" we will ignore the rest.\"\n )\n tsX = tsX[:, 0]\n if not tsX[0].is_integer() or not tsX[-1].is_integer():\n logger.warning(\n \"Detected non-integer timestamps in timestamp file.\"\n \" We will floor start and end times to the nearest integer\"\n \" for the SFT name,\"\n \" and let lalpulsar_Makefakedata_v5 handle the rest.\"\n )\n\n this_start_time = int(tsX[0])\n this_end_time = int(tsX[-1]) + self.Tsft\n tstart.append(this_start_time)\n tend.append(this_end_time)\n self.sftfilenames.append(\n utils.get_official_sft_filename(\n IFO,\n len(tsX),\n self.Tsft,\n this_start_time,\n this_end_time - this_start_time,\n self.label,\n )\n )\n self.tstart = min(tstart)\n self.duration = max(tend) - self.tstart", "def converting_timestamps(array):\r\n row = 0\r\n data = array\r\n month_dict = {\"Jan\": \"01\", \"Feb\": \"02\", \"Mar\": \"03\", \"Apr\": \"04\",\r\n \"May\": \"05\", \"Jun\": \"06\", \"Jul\": \"07\", \"Aug\": \"08\",\r\n \"Sept\": \"09\", \"Oct\": \"10\", \"Nov\": \"11\", \"Dec\": \"12\"}\r\n for i in array:\r\n if len(data[0][0]) <= 1:\r\n string_lst = i.split()\r\n data[row] = np.array(\"{}-{}-{} {}\".format(string_lst[5], month_dict[string_lst[1]],\r\n string_lst[2], string_lst[3]))\r\n row += 1\r\n else:\r\n string_lst = i[0].split()\r\n data[row][0] = np.array(\"{}-{}-{} {}\".format(string_lst[5],\r\n month_dict[string_lst[1]], string_lst[2], string_lst[3]))\r\n row += 1\r\n return data", "def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes", "def __find_nearest_weathers(\n self, timestamp: datetime, weather_list: list,\n ) -> dict:\n if (\n timestamp.tzinfo is None\n ): # If timestamp is naive (tzinfo = None),\n # make it so that it is the same as weather_list timestamp.\n timestamp = timestamp.replace(tzinfo=weather_list[0].date.tzinfo)\n\n beforeWeathers = list(\n filter(\n lambda x: timestamp >= x.date - timedelta(minutes=1),\n weather_list,\n ),\n )\n afterWeathers = list(\n filter(lambda x: timestamp < x.date, weather_list),\n )\n before = None\n beforeSeconds = 999999999999999999999999999\n after = None\n afterSeconds = 999999999999999999999999999\n\n for bw in beforeWeathers:\n if timestamp > bw.date:\n t = timestamp - bw.date\n else:\n t = bw.date - timestamp\n if beforeSeconds > t.seconds:\n before = bw\n beforeSeconds = t.seconds\n for aw in afterWeathers:\n if timestamp > aw.date:\n t = timestamp - aw.date\n else:\n t = aw.date - timestamp\n if afterSeconds > t.seconds:\n after = aw\n afterSeconds = t.seconds\n return {\n 'before': {'weather': before, 'seconds': beforeSeconds},\n 'after': {'weather': after, 'seconds': afterSeconds},\n }", "def entries_from_goes_ts_file2(file, default_waveunit=None):\n\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n observation_time_start = start_time\n observation_time_end = end_time\n\n wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n entry = DatabaseEntry(observation_time_start=start_time,\n observation_time_end = end_time,\n instrument='EIT',\n wavemin=wavemin,\n wavemax=wavemax,\n metadata=metadata,\n size=size)\n\n return entry", "def create_timestructured(self, good, quantity):\n length = len(self._haves[good].time_structure)\n for i in range(length):\n qty = quantity[i] if type(quantity) == list else quantity / length\n self._haves[good].time_structure[i] += qty", "def test_str_time_2(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"time_2\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xFB,\n 0x29,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -405995.52)\n self.assertEqual(sensor.unit_of_measurement(), \"ms\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_parse_weather_data_no_wind(self):\n # Get a copy of the data.\n data = copy.deepcopy(self.weather_simple)\n\n # Remove average wind speed\n del data['data'][0]['AvgWindSpeed']\n\n # We should not get a ValueError.\n # noinspection PyBroadException\n try:\n timeseries.parse_weather(data)\n except Exception:\n m = ('An exception was raised when parsing simple weather without '\n 'AvgWindSpeed')\n self.fail(m)", "def power_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n\n ]", "def __init__(self, timestamps, signal):\n self.timestamps = timestamps\n self.signal = signal\n \n self.timestamps = self._format_timestamps() \n return", "def _clean_times(self):\n if hasattr(self, 'train_times'):\n self.times = self.train_times\n if hasattr(self, 'train_times_'):\n self.times_ = self.train_times_\n for attr in ['test_times', 'train_times',\n 'test_times_', 'train_times_']:\n if hasattr(self, attr):\n delattr(self, attr)\n if hasattr(self, 'y_pred_'):\n self.y_pred_ = [y_pred[0] for y_pred in self.y_pred_]\n if hasattr(self, 'scores_'):\n self.scores_ = [score[0] for score in self.scores_]", "def add_missing_weather_data(df):\n\n full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H')\n sites = list(set(df.site_id))\n full_data_site_range = pd.DataFrame(itertools.product(sites, full_date_range),\n columns=['site_id', 'timestamp'])\n df_all_dates = full_data_site_range.merge(df, on=['site_id', 'timestamp'], how='left')\n df_all_dates = df_all_dates.groupby('site_id').apply(lambda group: group.interpolate(limit_direction='both'))\n\n return df_all_dates", "def converttime(time, currentformat, newformat):\n\n # Define conversion dictionary\n conversions = {\n \"milliseconds\": {\n \"milliseconds\": \"time\",\n \"seconds\": \"time / 1000\",\n \"minutes\": \"time / 1000 / 60\",\n \"hours\": \"time / 1000 / 60 / 60\",\n \"days\": \"time / 1000 / 60 / 60 / 24\",\n \"weeks\": \"time / 1000 / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 1000 / 60 / 60 / 24 / 14\",\n \"years\": \"time / 1000 / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 1000 / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 1000 / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 1000 / 60 / 60 / 24 / 365 / 1000\"\n },\n \"seconds\": {\n \"milliseconds\": \"time * 1000\",\n \"seconds\": \"time\",\n \"minutes\": \"time / 60\",\n \"hours\": \"time / 60 / 60\",\n \"days\": \"time / 60 / 60 / 24\",\n \"weeks\": \"time / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 60 / 24 / 14\",\n \"years\": \"time / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 60 / 24 / 365 / 1000\"\n },\n \"minutes\": {\n \"milliseconds\": \"time * 60 * 1000\",\n \"seconds\": \"time * 60\",\n \"minutes\": \"time\",\n \"hours\": \"time / 60\",\n \"days\": \"time / 60 / 24\",\n \"weeks\": \"time / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 24 / 14\",\n \"years\": \"time / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 24 / 365 / 1000\"\n },\n \"hours\": {\n \"milliseconds\": \"time * 60 * 60 * 1000\",\n \"seconds\": \"time * 60 * 60\",\n \"minutes\": \"time * 60\",\n \"hours\": \"time\",\n \"days\": \"time / 24\",\n \"weeks\": \"time / 24 / 7\",\n \"fortnights\": \"time / 24 / 14\",\n \"years\": \"time / 24 / 365\",\n \"decades\": \"time / 24 / 365 / 10\",\n \"centuries\": \"time / 24 / 365 / 100\",\n \"millenniums\": \"time / 24 / 365 / 1000\"\n },\n \"days\": {\n \"milliseconds\": \"time * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 24 * 60 * 60\",\n \"minutes\": \"time * 24 * 60\",\n \"hours\": \"time * 24\",\n \"days\": \"time\",\n \"weeks\": \"time / 7\",\n \"fortnights\": \"time / 14\",\n \"years\": \"time / 365\",\n \"decades\": \"time / 365 / 10\",\n \"centuries\": \"time / 365 / 100\",\n \"millenniums\": \"time / 365 / 1000\"\n },\n \"weeks\": {\n \"milliseconds\": \"time * 7 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 7 * 24 * 60 * 60\",\n \"minutes\": \"time * 7 * 24 * 60\",\n \"hours\": \"time * 7 * 24\",\n \"days\": \"time * 7\",\n \"weeks\": \"time\",\n \"fortnights\": \"time / 2\",\n \"years\": \"time / 52\",\n \"decades\": \"time / 52 / 10\",\n \"centuries\": \"time / 52 / 100\",\n \"millenniums\": \"time / 52 / 1000\"\n },\n \"fortnights\": {\n \"milliseconds\": \"time * 14 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 14 * 24 * 60 * 60\",\n \"minutes\": \"time * 14 * 24 * 60\",\n \"hours\": \"time * 14 * 24\",\n \"days\": \"time * 14\",\n \"weeks\": \"time * 2\",\n \"fortnights\": \"time\",\n \"years\": \"time / 26\",\n \"decades\": \"time / 26 / 10\",\n \"centuries\": \"time / 26 / 100\",\n \"millenniums\": \"time / 26 / 1000\"\n },\n \"years\": {\n \"milliseconds\": \"time * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 256 * 24 * 60\",\n \"hours\": \"time * 256 * 24\",\n \"days\": \"time * 256\",\n \"weeks\": \"time * 52\",\n \"fortnights\": \"time * 26\",\n \"years\": \"time\",\n \"decades\": \"time / 10\",\n \"centuries\": \"time / 100\",\n \"millenniums\": \"time / 1000\"\n },\n \"decades\": {\n \"milliseconds\": \"time * 10 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 10 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 10 * 256 * 24 * 60\",\n \"hours\": \"time * 10 * 256 * 24\",\n \"days\": \"time * 10 * 256\",\n \"weeks\": \"time * 10 * 52\",\n \"fortnights\": \"time * 10 * 26\",\n \"years\": \"time * 10\",\n \"decades\": \"time\",\n \"centuries\": \"time / 10\",\n \"millenniums\": \"time / 100\"\n },\n \"centuries\": {\n \"milliseconds\": \"time * 100 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 100 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 100 * 256 * 24 * 60\",\n \"hours\": \"time * 100 * 256 * 24\",\n \"days\": \"time * 100 * 256\",\n \"weeks\": \"time * 100 * 52\",\n \"fortnights\": \"time * 100 * 26\",\n \"years\": \"time * 100\",\n \"decades\": \"time * 10\",\n \"centuries\": \"time\",\n \"millenniums\": \"time / 10\"\n },\n \"millenniums\": {\n \"milliseconds\": \"time * 1000 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 1000 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 1000 * 256 * 24 * 60\",\n \"hours\": \"time * 1000 * 256 * 24\",\n \"days\": \"time * 1000 * 256\",\n \"weeks\": \"time * 1000 * 52\",\n \"fortnights\": \"time * 1000 * 26\",\n \"years\": \"time * 1000\",\n \"decades\": \"time * 100\",\n \"centuries\": \"time * 10\",\n \"millenniums\": \"time\"\n }\n }\n\n # Return evaluated value\n return eval(conversions[currentformat][newformat])", "def data_preprocessing(dataset):\r\n df = pd.read_csv(dataset)\r\n df.head()\r\n df.describe()\r\n df.isnull().sum()\r\n df= df.drop(['instant'], axis=1)\r\n df['dteday'] = pd.to_datetime(df['dteday'].apply(str) + ' ' + df['hr'].apply(str) + ':00:00')\r\n return df", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def make_times(night, runs, observatory, times, full, instrument, okwrite):\n\n # use this to check times are vaguely right. time of runs\n # must lie between 06.00 local time on date corresponding to\n # start of night date and 1.5 days later. Has picked up a\n # few erroneously dated nights on the TNT.\n mjd_ref = Time(night).mjd - observatory.lon.degree/360 + 0.25\n\n tdata = {}\n with open(times if okwrite else os.devnull,'w') as tout:\n for run in runs:\n if full:\n print(f'Analysing times for run {run}')\n dfile = os.path.join(night, run)\n try:\n ntotal = 0\n if instrument == 'HiPERCAM':\n rtime = hcam.hcam.Rtime(dfile)\n else:\n rtime = hcam.ucam.Rtime(dfile)\n\n # Find first good time, has to roughly match the start\n # date of the night because some times can just be\n # junk\n not_alerted = True\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n expose = 1000000\n for tmid,texp,tiflag in tinfo:\n expose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n expose = round(time.expose,3)\n\n if instrument == 'HiPERCAM' or tflag:\n mjd_start = time.mjd\n tdelta = mjd_start-mjd_ref\n if tdelta > 0 and tdelta < 1.5:\n ts = Time(mjd_start, format=\"mjd\", precision=2)\n ut_start = ts.hms_custom\n n_start = n+1\n if expose >= 0 and expose < 2000:\n break\n elif not_alerted and (tdelta < 0 or tdelta > 1.5):\n # maximum one warning per run\n not_alerted = False\n print(f' Bad time: tdelta = {tdelta} < 0 or > 1.5 on time {n} of {dfile}')\n else:\n ntotal = 0\n raise hcam.HipercamError(f'No good times found in {dfile}')\n\n # Find last good time. First we just go for times near the\n # end of the run. Failing that, we try again from the start,\n # to account for runs with time stamp issues.\n if instrument == 'HiPERCAM':\n nback = 4\n elif rtime.header['MODE'] == 'DRIFT':\n # ultracam or hipercam\n win = rtime.win[0]\n nyu = win.ny*rtime.ybin\n nback = int((1033/nyu + 1) / 2) + 3\n elif rtime.header['MODE'] == 'UDRIFT':\n # ultraspec\n win = rtime.win[0]\n nyu = win.ny*rtime.ybin\n nback = int((1037/nyu + 1) / 2) + 3\n else:\n # non drift mode\n nback = 4\n\n if instrument == 'HiPERCAM':\n ntotal = rtime.ntotal()\n else:\n nbytes = os.stat(dfile + '.dat').st_size\n ntotal = nbytes // rtime.framesize\n\n if instrument != 'HiPERCAM' and ntotal > 20000:\n # this is a risk-reducing strategy in case the end\n # of a long ultracam or ultraspec run is\n # corrupt. Better to look at more than the\n # necessary number of frames if it prevents us\n # from having to wind through the whole lot.\n nback = max(nback, 500)\n\n # next statement basically resets the frame\n # we are on\n nreset = max(1, ntotal - nback)\n rtime.set(nreset)\n\n flast = False\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n nexpose = 1000000\n for tmid,texp,tiflag in tinfo:\n nexpose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n nexpose = round(time.expose,3)\n\n if instrument == 'HiPERCAM' or tflag:\n mjd = time.mjd\n if mjd >= mjd_start and mjd < mjd_start + 0.4:\n mjd_end = mjd\n ts = Time(mjd_end, format=\"mjd\", precision=2)\n ut_end = ts.hms_custom\n n_end = nreset + n\n if nexpose < 2000:\n expose = max(expose, nexpose)\n flast = True\n\n if not flast:\n # no good time found near end. There must be\n # one or we wouldn't get to this point, so\n # grind it out the hard way by going through\n # the whole run, which can be slow.\n rtime.set()\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n nexpose = 1000000\n for tmid,texp,tiflag in tinfo:\n nexpose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n nexpose = round(time.expose,3)\n\n if tflag:\n mjd = time.mjd\n if mjd >= mjd_start and mjd < mjd_start + 0.4:\n mjd_end = mjd\n ts = Time(mjd_end, format=\"mjd\", precision=2)\n ut_end = ts.hms_custom\n n_end = n + 1\n if nexpose < 2000:\n expose = max(expose, nexpose)\n\n nok = n_end-n_start+1\n if n_end > n_start:\n cadence = round(86400*(mjd_end-mjd_start)/(n_end-n_start),3)\n tdata[run] = [ut_start,mjd_start,ut_end,mjd_end,cadence,expose,nok,ntotal]\n else:\n cadence = 'UNDEF'\n tdata[run] = [ut_start,mjd_start,ut_end,mjd_end,'',expose,nok,ntotal]\n tout.write(f'{run} {ut_start} {mjd_start} {ut_end} {mjd_end} {cadence} {expose} {nok} {ntotal}\\n')\n\n except hcam.ucam.PowerOnOffError:\n # Power on/off\n tdata[run] = ['power-on-off',]\n tout.write(f'{run} power-on-off\\n')\n if full: print(f'{run} was a power-on or -off')\n\n except hcam.HipercamError:\n # No good times\n tdata[run] = ['','','','','','',0,ntotal]\n tout.write(f'{run} UNDEF UNDEF UNDEF UNDEF UNDEF UNDEF 0 {ntotal}\\n')\n if full:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(f'No good times found for {run}; ntotal = {ntotal}')\n\n except:\n # some other failure\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(\"Problem on run = \", dfile)\n\n # Load of undefined\n tdata[run] = 8*['']\n tout.write(f'{run} {\" \".join(8*[\"UNDEF\"])}\\n')\n\n if okwrite:\n print('Written timing data to',times)\n\n return tdata", "def statistics_transform(row):\n row[0] = \"%s 00:00:00\" % row[0] # BQ TIMESTAMP format\n return row", "def initial_meter_statistics(apps, schema_editor):\n MeterStatistics = apps.get_model('dsmr_datalogger', 'MeterStatistics')\n DsmrReading = apps.get_model('dsmr_datalogger', 'DsmrReading')\n\n # We can't (and shouldn't) use Solo here.\n stats = MeterStatistics.objects.create() # All fields are NULL in database, by design.\n assert MeterStatistics.objects.exists()\n\n try:\n # Just use the latest DSMR reading, if any.\n latest_reading = DsmrReading.objects.all().order_by('-timestamp')[0]\n except IndexError:\n return\n\n stats.electricity_tariff = latest_reading.electricity_tariff\n stats.power_failure_count = latest_reading.power_failure_count\n stats.long_power_failure_count = latest_reading.long_power_failure_count\n stats.voltage_sag_count_l1 = latest_reading.voltage_sag_count_l1\n stats.voltage_sag_count_l2 = latest_reading.voltage_sag_count_l2\n stats.voltage_sag_count_l3 = latest_reading.voltage_sag_count_l3\n stats.voltage_swell_count_l1 = latest_reading.voltage_swell_count_l1\n stats.voltage_swell_count_l2 = latest_reading.voltage_swell_count_l2\n stats.voltage_swell_count_l3 = latest_reading.voltage_swell_count_l3\n stats.save()", "def test_temperatures(get_touchmat):\n touchmat = get_touchmat\n\n temperatures = touchmat.temperatures()\n info = touchmat.info()\n check_system_types.check_TemperatureInfoList(temperatures, [info])", "def __init__(self):\n\n self.name = \"\"\n self.address = \"\"\n self.coordinates = []\n self.weatherHours = []\n self.currentHour = 0", "def organize_data(path_dir, accelerometer_file, accelerometer_data):\n\n accelerometer_df = pd.read_csv(os.path.join(path_dir, accelerometer_file), usecols=['UTC time', 'x', 'y', 'z'])\n\n x_list = accelerometer_df['x']\n y_list = accelerometer_df['y']\n z_list = accelerometer_df['z']\n UTC_times_list = accelerometer_df['UTC time']\n\n x_y_z_list_for_hour = [] # will contain 60*60 values, that every value is [x,y,z]\n\n curr_line_index = 0\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n for i in range(60):\n for j in range(60):\n if (curr_date_time.minute != i or curr_date_time.second != j) or curr_line_index + 1 == len(UTC_times_list): # the curr time is more or little then the wanted time, or we finished all the lines in the file --> there is a need to fulfill the values with 0,0,0\n continue\n else:\n x_y_z_list_for_hour.append([x_list[curr_line_index], y_list[curr_line_index], z_list[curr_line_index]])\n while curr_date_time.minute == i and curr_date_time.second <= j and curr_line_index + 1 != len(UTC_times_list):\n curr_line_index += 1\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n date = get_date_from_file_name(accelerometer_file)\n hour = curr_date_time.hour\n if date not in accelerometer_data.data_dic:\n accelerometer_data.data_dic[date] = {}\n accelerometer_data.data_dic[date][hour] = x_y_z_list_for_hour", "def _convert_timestamp_2_periodic_time(self, timestamp):\n \n l = \"\"\n\n # daily periodic\n theta = self.two_pi_by_one_day_second * (int(timestamp[0:-3]) % self.one_day_second)\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n l += \",\"\n\n # weekly periodic\n theta = self.two_pi_by_seven_days_second * (int(timestamp[0:-3]) % self.seven_days_second)\n # no need plus one?\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n\n return l", "async def get_temperatures(self, **kwargs: Any) -> Dict[str, float]:\n ...", "def __process__(self, data: dict, metadata: dict, format: str):\n result = []\n metadata = MetadataModel(\n symbol=metadata[\"2. Symbol\"],\n last_refreshed=datetime.strptime(metadata[\"3. Last Refreshed\"], format),\n timezone=metadata[\"6. Time Zone\"] if \"6. Time Zone\" in metadata else metadata[\"5. Time Zone\"]\n )\n\n for key in data:\n result.append(TimeSerieModel(\n timestamp=datetime.strptime(key, format),\n open=float(data[key][\"1. open\"]),\n high=float(data[key][\"2. high\"]),\n low=float(data[key][\"3. low\"]),\n close=float(data[key][\"4. close\"]),\n volume=int(data[key][\"5. volume\"])\n ))\n\n ts = TimeSeriesModel(metadata=metadata, series_data=result)\n\n if self.datatype == \"class\":\n return ts\n elif self.datatype == \"pandas\":\n try:\n import pandas as pd\n return pd.DataFrame(ts.get_list())\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please make sure pandas is installed.\")", "def convert_all_timestamps(results: List[ResponseObject]) -> List[ResponseObject]:\n results = [convert_generic_timestamps(result) for result in results]\n results = [convert_observation_timestamps(result) for result in results]\n return results", "def __init__(self):\n self.timeMap = defaultdict(list)", "def __init__(self):\n self.timeMap = defaultdict(list)", "def clean_data(X):\n X_cleaned = []\n for i in X:\n X_cleaned.append([i[0], i[1], i[2].timestamp(), i[3], i[4].timestamp(), i[5]])\n return X_cleaned", "def read(self, *args, **kwargs):\n\n ts = super(SMOSTs, self).read(*args, **kwargs)\n\n if self.drop_missing:\n ts = ts.dropna(how='all')\n\n for col in ts: # convert to ints, if possible\n if (not np.isnan(ts[col]).any()) and \\\n (np.all(np.mod(ts[col].values, 1) == 0.)):\n ts[col] = ts[col].astype(int)\n\n if self.index_add_time:\n ts = self._to_datetime(ts)\n\n return ts", "def aquarius_timestamp(arg):\n\n meta = readMetadata(arg)\n\n sat_name = meta['Sensor'].lower()\n stime = meta['Start Time'][0:13]\n etime = meta['End Time'][0:13]\n\n return (stime,\n etime,\n sat_name)", "def entries_from_goes_ts_files(*files, default_waveunit=None, source=None):\n\n\n \"\"\"\n ts_goes = ts.TimeSeries(file)\n statinfo = os.stat(file)\n entry = DatabaseEntry(path=file)\n entry.size = statinfo.st_size\n\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n #\n entry.observation_time_start = ts_goes.meta.get('date-beg').values()[0]\n entry.observation_time_end = ts_goes.meta.get('date-end').values()[0]\n\n entry.metadata = ts_goes.meta.metadata[0][2]\n\n #entry.tags = [ sunpy.database.attrs.Tag('raw') ]\n \"\"\"\n\n\n for file in files:\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n entry.size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n elif headers[1].get('TELESCOP','') != '':\n entry.instrument = headers[1]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n entry.observation_time_start = start_time\n entry.observation_time_end = end_time\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n if source:\n entry.source = source\n\n entry.metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n #entry = DatabaseEntry(instrument='EIT', wavemin=25.0)\n\n #return entry\n yield entry", "def adapt_timestamp(data):\n return str(time.mktime(data.timetuple()))" ]
[ "0.5928694", "0.5771849", "0.57145715", "0.56722647", "0.5632749", "0.55614084", "0.55107826", "0.55105793", "0.5509269", "0.55014396", "0.5444029", "0.5409909", "0.5388422", "0.5376563", "0.5362756", "0.535319", "0.5340124", "0.5325145", "0.53104377", "0.5308769", "0.52715063", "0.5247465", "0.5241853", "0.5221392", "0.52180904", "0.5204549", "0.52002156", "0.51882434", "0.5155208", "0.5154132", "0.5142424", "0.51381046", "0.5133876", "0.51297134", "0.5128587", "0.5119893", "0.51091", "0.51019204", "0.51013446", "0.50994027", "0.50975305", "0.5095365", "0.50948256", "0.5090419", "0.5082667", "0.5070057", "0.5062461", "0.5044873", "0.50441796", "0.504008", "0.5038316", "0.5035068", "0.5028656", "0.502466", "0.50220853", "0.50189775", "0.5011199", "0.5010547", "0.49861947", "0.49678665", "0.49565807", "0.49546015", "0.49542534", "0.4953629", "0.49530843", "0.49522427", "0.49511263", "0.4948617", "0.49367377", "0.49353698", "0.49324086", "0.49308932", "0.49294943", "0.49252775", "0.4923754", "0.4920681", "0.49176893", "0.491338", "0.49115482", "0.4908282", "0.48997715", "0.48954615", "0.48940715", "0.48881876", "0.4887174", "0.4887041", "0.48849696", "0.48839918", "0.48791164", "0.48756254", "0.485908", "0.48532304", "0.48441473", "0.4838503", "0.4838503", "0.48377222", "0.48363727", "0.4829068", "0.48271435", "0.48255816" ]
0.5138391
31
Join together the meter data, weather data and building metadata into one df data = dict of df's (keys are'building_metadata', 'weather_train', 'weather_test', 'train','test') dataset_name = 'train' or 'test' returns a merged df which includes building_metadata, weather_train (or weather_test) and train (or test)
def join_input_data_and_multi_index(data, dataset_name): meter_df = data[dataset_name] building_df = data['building_metadata'] weather_df = data['weather_' + dataset_name] # join meter and weather data building_n_meter = meter_df.merge(building_df, on='building_id', how='left') joined_data = building_n_meter.merge(weather_df, on=['site_id', 'timestamp'], how='left') # Add time related columns joined_data['hour'] = joined_data['timestamp'].dt.hour joined_data['weekday'] = joined_data['timestamp'].dt.dayofweek joined_data['week_number'] = joined_data['timestamp'].dt.week joined_data['month'] = joined_data['timestamp'].dt.month joined_data['is_weekend'] = joined_data['weekday'].apply(lambda x: 1 if x in [0, 6] else 0) # multi index on building id and timestamp joined_data = joined_data.set_index(['building_id', 'timestamp']).sort_index() return joined_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def load_data(data_links_list=(\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/raw_data.csv',\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/sample_meta_info.tsv')):\n\n # Reading data sets from the links provided.\n df1 = pd.read_csv(data_links_list[0],\n error_bad_lines=False)\n df2 = pd.read_csv(data_links_list[1],\n sep='\\t')\n df2 = df2.set_index(df2['project'])\n # fill the Nas id df1 as \". Makes the groupbys behave better.\n df1.fillna('', inplace=True)\n # repleace 'genus' = 'other' with an empty string to be consistent.\n df1.replace(to_replace='other', value='', inplace=True)\n # Removing duplicate columns.\n del df2['project']\n del df2['ID']\n df1 = df1.set_index(df1['project'])\n # Removing duplicate column.\n del df1['project']\n # Joining the two datasets.\n df = df1.join(df2)\n # Uniformity in non-capitalization of column names.\n df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',\n 'Class': 'class', 'Order': 'order',\n 'Family': 'family', 'Genus': 'genus',\n 'Length': 'length'}, inplace=True)\n df.index.names = ['sampleID']\n # Rearranging columns so that abundance is the last column.\n df = df[['kingdom',\t'phylum', 'class', 'order',\n 'family', 'genus', 'length', 'oxygen',\n 'replicate', 'week', 'abundance']]\n assert isinstance(df, pd.DataFrame)\n return df", "def combine(new_data, raw_data):\n return pd.merge(new_data, raw_data, on=[\"location\", \"date\"], how=\"outer\")", "def get_datasets(business_data_file, enter_data_file, politics_data_file, sport_data_file, tech_data_file):\n # Load data from files\n business_examples = list(open(business_data_file, \"r\").readlines())\n business_examples = [s.strip() for s in business_examples]\n enter_examples = list(open(enter_data_file, \"r\").readlines())\n enter_examples = [s.strip() for s in enter_examples]\n politics_examples = list(open(politics_data_file, \"r\").readlines())\n politics_examples = [s.strip() for s in politics_examples]\n sport_examples = list(open(sport_data_file, \"r\").readlines())\n sport_examples = [s.strip() for s in sport_examples]\n tech_examples = list(open(tech_data_file, \"r\").readlines())\n tech_examples = [s.strip() for s in tech_examples]\n\n datasets = dict()\n datasets['data'] = business_examples + enter_examples + politics_examples + sport_examples + tech_examples\n target = [0 for x in business_examples] + [1 for x in enter_examples] + [2 for x in politics_examples] + [3 for x in sport_examples] + [4 for x in tech_examples]\n datasets['target'] = target\n datasets['target_names'] = ['business_examples', 'enter_examples', 'politics_examples', 'sport_examples', 'tech_examples']\n return datasets", "def merge_data(agg_cases, lk_info, geolocation_data):\n merged_df = pd.merge(agg_cases, lk_info, left_on='IdLandkreis', right_on = 'Key')\n merged_df[\"RelativFall\"] = merged_df[\"AnzahlFall\"] / merged_df[\"Bev Insgesamt\"]\n merged_df[\"RelativTodesfall\"] = merged_df[\"AnzahlTodesfall\"] / merged_df[\"Bev Insgesamt\"]\n merged_df = pd.merge(merged_df, geolocation_data, left_on=\"Key\", right_on=\"cca_2\")\n return merged_df", "def merge_data():\n\n\tconfig = Config()\n\tfilename_train, filename_test = \"../data/train.csv\", \"../data/test.csv\" \n\n # create datasets\n\ttrain, test = config.load_data(filename_train, filename_test, print_EDA=False)\n\n # 1. datetime features\n\t# diff between weekday and day?\n\t#weekday - Return the day of the week as an integer, where Monday is 0 and Sunday is 6.\n\t#day - Between 1 and the number of days in the given month of the given year.\n\ttrain['pickup_hour'] = train.pickup_datetime.dt.hour.astype('uint8')\n\ttrain['pickup_day'] = train.pickup_datetime.dt.day.astype('uint8')\n\ttrain['pickup_weekday'] = train.pickup_datetime.dt.weekday.astype('uint8')\n\ttrain['pickup_minute'] = train.pickup_datetime.dt.minute.astype('uint8')\n\ttrain['pickup_month'] = train.pickup_datetime.dt.month.astype('uint8')\n\ttrain['pickup_hour_weekofyear'] = train['pickup_datetime'].dt.weekofyear\n\ttrain['pickup_weekday_hour'] = train['pickup_weekday']*24 + train['pickup_hour']\n\n\ttest['pickup_hour'] = test.pickup_datetime.dt.hour.astype('uint8')\n\ttest['pickup_day'] = test.pickup_datetime.dt.day.astype('uint8')\n\ttest['pickup_weekday'] = test.pickup_datetime.dt.weekday.astype('uint8')\n\ttest['pickup_minute'] = test.pickup_datetime.dt.minute.astype('uint8')\n\ttest['pickup_month'] = test.pickup_datetime.dt.month.astype('uint8')\n\ttest['pickup_hour_weekofyear'] = test['pickup_datetime'].dt.weekofyear\n\ttest['pickup_weekday_hour'] = test['pickup_weekday']*24 + test['pickup_hour']\n\n\t# 2. Location features\n\tdef haversine(lon1, lat1, lon2, lat2):\n\t lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\t dlon = lon2 - lon1\n\t dlat = lat2 - lat1\n\t a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\t c = 2 * np.arcsin(np.sqrt(a))\n\t km = 6367 * c # AVG_EARTH_RADIUS=6367\n\t miles = km * 0.621371\n\t return miles\n\n\t# def dummy_manhattan_distance(lat1, lng1, lat2, lng2):\n\t# a = haversine_array(lat1, lng1, lat1, lng2)\n\t# b = haversine_array(lat1, lng1, lat2, lng1)\n\t# return a + b\n\n\t# def bearing_array(lat1, lng1, lat2, lng2):\n\t# AVG_EARTH_RADIUS = 6371 # in km\n\t# lng_delta_rad = np.radians(lng2 - lng1)\n\t# lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))\n\t# y = np.sin(lng_delta_rad) * np.cos(lat2)\n\t# x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)\n\t# return np.degrees(np.arctan2(y, x))\n\n\ttrain['distance'] = haversine(train.pickup_longitude, train.pickup_latitude,\n\t train.dropoff_longitude, train.dropoff_latitude)\n\ttest['distance'] = haversine(test.pickup_longitude, test.pickup_latitude,\n\t test.dropoff_longitude, test.dropoff_latitude)\n\n\n\t# 3. Use outsource data\n\tweatherdata_filename = \"../data/outsource_data/weather_data_nyc_centralpark_2016.csv\"\n\tfastestroute_data_train = \"../data/outsource_data/fastest_train.csv\"\n\tfastestroute_data_test = \"../data/outsource_data/fastest_routes_test.csv\"\n\n\n\twd = pd.read_csv(weatherdata_filename, header=0)\n\twd['date'] = pd.to_datetime(wd.date, format=\"%d-%m-%Y\")\n\twd['pickup_day'] = wd['date'].dt.day\n\twd['snow fall'] = wd['snow fall'].replace('T', 0.05).astype(np.float32) \n\twd['precipitation'] = wd['precipitation'].replace('T', 0.05).astype(np.float32) \n\twd['snow depth'] = wd['snow depth'].replace('T', 0.05).astype(np.float32) \n\n\t# Merge training data with weather data on pickup_day\n\tprint(\"Merging training data with weather data ....\")\n\twd_train = pd.merge(train, wd, on='pickup_day')\n\twd_train = wd_train.drop(['date','maximum temperature','minimum temperature'],axis=1)\n\tgc.collect()\n\n\t# Merge wd_train with fastestroute_data\n\tfastest = pd.read_csv(fastestroute_data_train, header=0)\n\tprint(\"Merging Location data with weather and training data ....\")\n\twd_train_fastest = pd.merge(wd_train, fastest, on='id', how='outer')\n\n\tgc.collect()\n\n\n\tprint(\"===================== CHECK TRAINING DATA =====================\")\n\n\tprint(wd_train_fastest.head(2))\n\tprint(\"Semi-final training data shape is: {}\".format(wd_train_fastest.shape))\n\tprint(\"Training data columns: {}\".format(wd_train_fastest.columns))\n\n\n\t# Use the same outsource data with test set\n\t# merge outsource data with test data as well\n\tft_test_cols = [ 'id', 'starting_street','end_street','total_distance',\t'total_travel_time',\n\t\t\t\t\t'number_of_steps','street_for_each_step','distance_per_step','travel_time_per_step',\n\t 'step_maneuvers','step_direction',\t'step_location_list']\n\tfastest_test = pd.read_csv(fastestroute_data_test, names=ft_test_cols, header=0)\n\n\tprint(\"Merging test data with Location data ....\")\n\ttest = pd.merge(test, fastest_test, on='id', how='outer')\n\ttest = test.drop(['step_location_list','step_direction','step_maneuvers','travel_time_per_step','distance_per_step','street_for_each_step','number_of_steps','starting_street',\n\t 'end_street'], axis=1)\n\tprint(\"Merging test data with weather data ....\")\n\ttest = pd.merge(test, wd, on='pickup_day')\n\n\tprint(\"===================== CHECK TEST DATA =====================\")\n\n\tprint(test.head(2))\n\tprint(\"Semi-final test data shape is: {}\".format(test.shape))\n\tprint(\"Test data columns: {}\".format(test.columns))\n\n\n\n\t# 4. Do more data munging\n\tmask = ((wd_train_fastest.trip_duration > 60) & (wd_train_fastest.distance < 0.05))\n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = (wd_train_fastest.trip_duration < 60) \n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = wd_train_fastest.trip_duration > 79200\n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = wd_train_fastest.distance/(wd_train_fastest.trip_duration/3600) > 60\n\twd_train_fastest = wd_train_fastest[~mask]\n\twd_train_fastest.trip_duration = wd_train_fastest.trip_duration.astype(np.uint16)\n\twd_train_fastest = wd_train_fastest[wd_train_fastest.passenger_count > 0]\n\n\t# 5. Do some data maskig based on location to create jfk and lgo features\n\tjfk_lon = -73.778889\n\tjfk_lat = 40.639722\n\tlga_lon = -73.872611\n\tlga_lat = 40.77725\n\n\twd_train_fastest['jfk_pickup_dist'] = wd_train_fastest.apply(lambda row: haversine(jfk_lon, jfk_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)\n\twd_train_fastest['lga_pickup_dist'] = wd_train_fastest.apply(lambda row: haversine(lga_lon, lga_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)\n\twd_train_fastest['jfk_dropoff_dist'] = wd_train_fastest.apply(lambda row: haversine(jfk_lon, jfk_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)\n\twd_train_fastest['lga_dropoff_dist'] = wd_train_fastest.apply(lambda row: haversine(lga_lon, lga_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)\n\n\twd_train_fastest['jfk'] = ((wd_train_fastest['jfk_pickup_dist'] < 2) | (wd_train_fastest['jfk_dropoff_dist'] < 2))\n\twd_train_fastest['lga'] = ((wd_train_fastest['lga_pickup_dist'] < 2) | (wd_train_fastest['lga_dropoff_dist'] < 2))\n\twd_train_fastest = wd_train_fastest.drop(['jfk_pickup_dist','lga_pickup_dist','jfk_dropoff_dist','lga_dropoff_dist'],axis=1)\n\twd_train_fastest['workday'] = ((wd_train_fastest['pickup_hour'] > 8) & (wd_train_fastest['pickup_hour'] < 18))\n\n\n\tprint(\"===================== CHECK TRAINING DATA AGAIN =====================\")\n\n\tprint(wd_train_fastest.head(2))\n\tprint(\"Final training data shape is: {}\".format(wd_train_fastest.shape))\n\tprint(\"Training data columns: {}\".format(wd_train_fastest.columns))\n\n\n\treturn wd_train_fastest, test", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def merge_weather_trails(df_weather, df_hike):\n df_trail_year = pd.merge(\n df_hike, df_weather, how='left', left_on=[\n 'closet_station', 'last_year'], right_on=[\n 'name', 'DATE'])\n df_all_clean = df_trail_year.drop(['DATE', 'name'], axis=1)\n return df_all_clean", "def clean_data(raw_data, names=const.NAMES, meter_map=const.METER_MAP):\n\n cleaned_data = {}\n local_names = names.copy()\n if 'building_metadata' in local_names:\n local_names.remove('building_metadata')\n\n for name in local_names:\n print(f'Cleaning {name} dataset')\n df = raw_data[name]\n df.timestamp = pd.to_datetime(df.timestamp)\n if name.startswith('weather'):\n df = add_missing_weather_data(df)\n elif name in ['train', 'test']:\n df['meter_type'] = df['meter'].map(meter_map)\n cleaned_data[name] = df\n\n cleaned_data['building_metadata'] = raw_data['building_metadata']\n\n return cleaned_data", "def build_dataset_join_dfs(pset_dict, pset_name, primary_dfs={}):\n cell_df = primary_dfs['cell'] if 'cell' in primary_dfs else None\n tissue_df = primary_dfs['tissue'] if 'tissue' in primary_dfs else None\n compound_df = primary_dfs['drug'] if 'drug' in primary_dfs else None\n\n join_dfs = {}\n join_dfs['dataset_cell'] = build_dataset_cell_df(\n pset_dict, pset_name, cell_df)\n join_dfs['dataset_tissue'] = build_dataset_tissue_df(\n pset_dict, pset_name, tissue_df)\n join_dfs['dataset_compound'] = build_dataset_compound_df(\n pset_dict, pset_name, compound_df)\n return join_dfs", "def join():\n dataset_df = pd.read_excel(\"dataset.xlsx\")\n statistics_df = pd.read_excel(\"statistics.xlsx\")\n\n merge_df = pd.merge(dataset_df, statistics_df, on=['patient_identifier'])\n\n writer = pd.ExcelWriter('final_dataset.xlsx', engine='xlsxwriter')\n merge_df.to_excel(writer, sheet_name='Sheet1')\n writer.save()", "def train_test_data_df(train_data_file, test_data_file):\n dtype_dict = {\n \"age\": np.int32,\n \"education-num\": np.int32,\n \"capital-gain\": np.int32,\n \"capital-loss\": np.int32,\n \"hours-per-week\": np.int32\n }\n cols = [i for i in range(15) if i != 2]\n train_data = pd.read_csv(train_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n train_data = train_data.dropna(axis=0, how=\"any\")\n test_data = pd.read_csv(test_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n test_data = test_data.dropna(axis=0, how=\"any\")\n return train_data, test_data", "def data_merge(detector_fields):\n print(\"Merging final data...\")\n\n # load files that contain phase and I/O processed data and store as dfs\n phase_data = pd.read_csv(results_folder + 'phases/processed/clean_merged_phases.csv', header=0,\n skipinitialspace=True, usecols=output_fields)\n detection_data = pd.read_csv(results_folder + 'io/io_out.csv', header=0, skipinitialspace=True,\n usecols=detector_fields)\n phase_df = pd.DataFrame(phase_data)\n detection_df = pd.DataFrame(detection_data)\n\n # merge the two files based on their Date and Time fields\n output = pd.merge(phase_df, detection_df, on=['Date', 'Time'])\n\n # store the output with any duplicates dropped and create a final CSV file\n merged_df = output.drop_duplicates()\n merged_df.to_csv(results_folder + 'dataset.csv', sep=',', index=False)\n\n print(\"Data merged!\")\n print(\"Main dataset available: \" + results_folder + 'dataset.csv')\n\n # return location of dataset\n return results_folder + 'dataset.csv'", "def existing_data(self):\n # Set the directory and file name\n data_summary_dir = op.join('../logs', self.name, 'data_summary')\n file_name = 'Train_Test_Summary_generative.csv'\n\n # Read the csv and obtain the train data list\n df = pd.read_csv(op.join(data_summary_dir, file_name))\n train_data = df['Train Data'].dropna().values.tolist()\n test_data = df['Test Data'].dropna().values.tolist()\n\n train_data_list, test_data_list = [], []\n for single_train in train_data:\n data_name = single_train.split('_')[0]\n if data_name == 'LTRC':\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4]\n else:\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4] + '_' + single_train.split('_')[5]\n full_data_name = single_train.split('_')[0] + '_' + single_train.split('_')[1] + '_' + single_train.split('_')[2] + '_' + series\n train_data_list.append(full_data_name)\n\n for single_test in test_data:\n data_name = single_test.split('_')[0]\n if data_name == 'LTRC':\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4]\n else:\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4] + '_' + single_test.split('_')[5]\n full_data_name = single_test.split('_')[0] + '_' + single_test.split('_')[1] + '_' + single_test.split('_')[2] + '_' + series\n test_data_list.append(full_data_name)\n\n # Obtain the label map and CT list and file names\n label_map_list = glob(op.join(self.save_root_dir, 'source_data_2', '*'))\n ct_list = glob(op.join(self.save_root_dir, 'target_data_2', '*'))\n\n label_map_files = [single_file.split('/')[-1] for single_file in label_map_list]\n ct_files = [single_file.split('/')[-1] for single_file in ct_list]\n label_map_files.sort(), ct_files.sort()\n\n # Initialize empty list\n existing_train_lm, existing_train_ct = [], []\n existing_test_lm, existing_test_ct = [], []\n\n for single_lm, single_ct in zip(label_map_files, ct_files):\n\n ct_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\n lm_data_name = single_lm.split('_')[0] + '_' + single_lm.split('_')[1] + '_' + single_lm.split('_')[2]\n\n assert ct_data_name == lm_data_name, 'Data is not the same.'\n\n data_name = single_ct.split('_')[0]\n if data_name == 'LTRC':\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4]\n else:\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4] + '_' + single_ct.split('_')[5]\n full_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\\\n + '_' + series\n\n if full_data_name in train_data_list:\n existing_train_lm.append(single_lm)\n existing_train_ct.append(single_ct)\n if full_data_name in test_data_list:\n existing_test_lm.append(single_lm)\n existing_test_ct.append(single_ct)\n existing_train_data = [existing_train_lm, existing_train_ct]\n existing_test_data = [existing_test_lm, existing_test_ct]\n return existing_train_data, existing_test_data", "def setup_merged_df(obs_df):\n obs_df = obs_df.assign(height=obs_df[\"measurement\"], weight=obs_df[\"measurement\"])\n obs_df.loc[obs_df.param == \"WEIGHTKG\", \"height\"] = np.NaN\n obs_df.loc[obs_df.param == \"HEIGHTCM\", \"weight\"] = np.NaN\n heights = obs_df[obs_df.param == \"HEIGHTCM\"]\n weights = obs_df[obs_df.param == \"WEIGHTKG\"]\n merged = heights.merge(\n weights, on=[\"subjid\", \"agedays\", \"ageyears\", \"sex\"], how=\"outer\"\n )\n only_needed_columns = merged.drop(\n columns=[\n \"param_x\",\n \"measurement_x\",\n \"clean_value_x\",\n \"weight_x\",\n \"id_y\",\n \"param_y\",\n \"measurement_y\",\n \"clean_value_y\",\n \"height_y\",\n ]\n )\n clean_column_names = only_needed_columns.rename(\n columns={\n \"clean_cat_x\": \"height_cat\",\n \"include_x\": \"include_height\",\n \"height_x\": \"height\",\n \"clean_cat_y\": \"weight_cat\",\n \"include_y\": \"include_weight\",\n \"weight_y\": \"weight\",\n \"reason_y\": \"reason\",\n \"id_x\": \"id\",\n }\n )\n clean_column_names[\"bmi\"] = clean_column_names[\"weight\"] / (\n (clean_column_names[\"height\"] / 100) ** 2\n )\n clean_column_names[\"rounded_age\"] = np.around(clean_column_names.ageyears)\n clean_column_names[\"include_both\"] = (\n clean_column_names[\"include_height\"] & clean_column_names[\"include_weight\"]\n )\n return clean_column_names", "def predict_energy_consumption(buildings):\n forecasts = [forecast_for_building(building) for i, building in buildings.iterrows()]\n df = pd.concat(forecasts)\n df.drop(columns=\"id\", inplace=True)\n df = buildings.merge(df, left_on=\"id\", right_on=\"building_id\")\n df[\"meter\"] = 0\n df[\"floor_count\"] = df[\"floorcount\"]\n df[\"air_temperature\"] = df[\"temp\"]\n df[\"relative_humidity\"] = df[\"humidity\"]\n df[\"dew_temperature\"] = df[\"air_temperature\"] - ((100 - df[\"relative_humidity\"]) / 5)\n df[\"precip_depth_1_hr\"] = np.nan\n df[\"timestamp\"] = pd.to_datetime(df[\"date\"])\n df[\"wind_direction\"] = df[\"deg\"]\n df[\"wind_speed\"] = df[\"speed\"]\n\n df.drop(columns=[\"id\", \"name\", \"floorcount\", \"latitude\", \"longitude\", \"user_id\", \"temp\", \"feels_like\", \"temp_min\",\n \"temp_max\", \"pressure\", \"sea_level\", \"grnd_level\", \"humidity\", \"temp_kf\", \"main\", \"description\",\n \"icon\", \"speed\", \"deg\", \"date\"], inplace=True)\n\n df_temp = df.copy(deep=True)\n for i in range(1, 4):\n df_temp[\"meter\"] += 1\n df = pd.concat([df, df_temp])\n del df_temp\n\n cfg = {\n 'circular_timestamp_encoding': False,\n 'log_transform_square_feet': True,\n 'log_transform_area_per_floor': True,\n 'label_square_feet_outlier': True,\n 'label_area_per_floor_outlier': True,\n 'encode_wind_direction': False,\n 'include_feels_like': True,\n 'fill_na_with_zero': False,\n 'add_lag_features': True,\n 'lag_columns': ['air_temperature', 'dew_temperature', 'cloud_coverage'],\n 'lag_windows': [6, 24],\n }\n [df] = build_features(df, cfg=cfg)\n\n df.reset_index(inplace=True, drop=True)\n building_ids = df[\"building_id\"]\n timestamps = df[\"timestamp\"]\n df.drop(columns=[\"timestamp\", \"month\", \"wind_direction\", \"wind_speed\", \"building_id\"], inplace=True)\n\n model_endpoint = \"http://model:5001/predict\"\n data = df.to_json()\n response = requests.get(model_endpoint, json=data).json()\n\n predictions = pd.DataFrame({\"reading\": response[\"prediction\"],\n \"building_id\": building_ids,\n \"meter\": df[\"meter\"],\n \"timestamp\": timestamps,\n \"air_temperature\": df[\"air_temperature\"]})\n return predictions", "def load_data(conso_train_file='../input/conso_train.csv',\n meteo_train_file='../input/meteo_train.csv',\n meteo_test_file='../input/meteo_prev.csv'):\n # Loading the weather data\n train_meteo = pd.read_csv(meteo_train_file, sep=';')\n test_meteo = pd.read_csv(meteo_test_file, sep=';')\n\n # Marking the type and merge the train/test dataframes\n train_meteo['type'] = 'train'\n test_meteo['type'] = 'test'\n data_meteo = pd.concat([train_meteo, test_meteo])\n # Correct the weather data\n data_meteo = correct_weather_data(data_meteo)\n\n # Load the train consumption\n train_conso = pd.read_csv(conso_train_file, sep=';')\n # Correct the consumption data\n train_conso = correct_conso_data(train_conso)\n\n # Mark the type and merge the consumption/weather dataframes\n train_conso['type'] = 'train'\n data = pd.merge(train_conso, data_meteo,\n on=['date', 'type'], how='outer', sort=True)\n\n # Complete the test dates\n data = add_test_dates(data)\n # Correct the test data from the merging\n data = correct_test_data(data)\n\n return data", "def group_data():\n\n # Merge on Departure.\n\n # Merge on Arrival.\n\n data = pd.read_csv(path + \"/data/public/public_train.csv\")[[\"DateOfDeparture\", \"Arrival\"]]\n data['DateOfDeparture'] = pd.to_datetime(data['DateOfDeparture'])\n\n arrival = join_cleaned_data().\\\n rename(columns={'Date': 'DateOfDeparture', 'Airport': 'Arrival'}).\\\n set_index(\"DateOfDeparture\")\n\n merged_arrv = pd.merge(data, arrival, on=[\"DateOfDeparture\", \"Arrival\"], how=\"left\")\n\n # Rename and drop columns.\n\n merged_arrv.columns = [c + \"_Arrival\" if c not in [\"DateOfDeparture\",\n \"DateOfArrival\",\n \"Arrival\",\n \"WeeksToDeparture\"]\n else c\n for c in merged_arrv.columns]\n print merged_arrv\n merged_arrv = merged_arrv.drop([\"Arrival\"], axis=1)\n\n # Concatenate the two fields.\n # merged_all = pd.concat([merged_arrv, merged_dept], axis=1)\n\n merged_all = merged_arrv.\\\n convert_objects(convert_numeric=True)\n merged_all.to_csv(path + \"/Submission/temperatures.csv\")", "def combine_data(self):\n for country in config.COUNTRIES:\n frames = []\n for year in config.years:\n incidence_path = (config.raw_data_path / country / 'complete'\n / (str(year) + '_' + str(year + 1) + '.csv'))\n\n if incidence_path.exists() and incidence_path.is_file():\n df_incidence = pd.read_csv(incidence_path)\n\n wiki_path1 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year) + '.csv')\n wiki_path2 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year + 1) + '.csv')\n\n if wiki_path1.exists() and wiki_path1.is_file():\n df_wiki1 = pd.read_csv(wiki_path1)\n df_wiki1 = df_wiki1.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki1, df_incidence, on='week', how='right')\n\n if wiki_path2.exists() and wiki_path2.is_file():\n df_wiki2 = pd.read_csv(wiki_path2)\n df_wiki2 = df_wiki2.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki2, df_incidence, on='week', how='right')\n\n for col_name in df_incidence.columns:\n if col_name[-1] == 'x':\n if col_name[:-2] + '_y' in df_incidence.columns:\n df_incidence[col_name[:-2]] = df_incidence[\n col_name].fillna(\n df_incidence[col_name[:-2] + '_y'])\n df_incidence = df_incidence.drop(\n columns=[col_name,\n col_name[:-2] + '_y'])\n\n frames.append(df_incidence)\n\n df_country = pd.concat(frames)\n df_country['date'] = pd.to_datetime(\n df_country.week.add('-0'), format='%Y-%W-%w')\n df_country = df_country.sort_values(by=\"date\")\n\n if 'cases' in df_country.columns:\n df_country.drop(columns=['cases'])\n\n file_path = config.combined_data_path / (country + '.csv')\n\n df_country.to_csv(file_path, index=False)", "def load_data():\n data_path = os.path.join('qual-o-mat-data', 'data', '2019', 'europa')\n data_keys = [\"answer\", \"comment\", \"opinion\", \"party\", \"statement\"]\n raw_data = dict()\n all_data = dict()\n\n # Create a dictionary of type <string, DataFrame> that contains the data from all JSON files\n for dk in data_keys:\n json_file = os.path.join(data_path, dk + \".json\")\n with open(json_file, \"r\") as fh:\n raw_data[dk] = json.load(fh)\n all_data[dk] = pd.DataFrame(raw_data[dk])\n\n\n # Based on the opinion data, merge all other data frames on their ID fields to get usable names instead of just ID numbers\n merged_df = all_data[\"opinion\"].copy()\n for to_merge in [\"party\", \"statement\", \"comment\", \"answer\"]:\n merged_df = merged_df.merge(all_data[to_merge], how='inner', left_on=[to_merge], right_on=['id'])\n\n #print(mdf.head())\n return merged_df, all_data, raw_data", "def ana_merge_datas(datas):\n return {\n 'searches':ana_merge_searches(datas),\n 'senzory_map':ana_merge_senzory_map(datas)\n }", "def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}", "def create_metadata(data_dir):\n pool = multiprocessing.Pool()\n\n train_feature_paths = [os.path.join(data_dir, \"train_features_{}.jsonl\".format(i)) for i in range(6)]\n train_records = list(pool.imap(read_metadata_record, raw_feature_iterator(train_feature_paths)))\n train_records = [dict(record, **{\"subset\": \"train\"}) for record in train_records]\n\n test_feature_paths = [os.path.join(data_dir, \"test_features.jsonl\")]\n test_records = list(pool.imap(read_metadata_record, raw_feature_iterator(test_feature_paths)))\n test_records = [dict(record, **{\"subset\": \"test\"}) for record in test_records]\n\n all_metadata_keys = [\"sha256\", \"appeared\", \"subset\", \"label\", \"avclass\"]\n ordered_metadata_keys = [k for k in all_metadata_keys if k in train_records[0].keys()]\n metadf = pd.DataFrame(train_records + test_records)[ordered_metadata_keys]\n metadf.to_csv(os.path.join(data_dir, \"metadata.csv\"))\n return metadf", "def data_and_metadata(self):\n data = self.data\n if self._metadata is not None and not self._metadata.empty:\n data = [self._metadata, data]\n data = pd.concat(data, axis=1)\n return data", "def merge_weather(weather):\n\n weather1 = weather[weather[\"Station\"] == 1]\n weather2 = weather[weather[\"Station\"] == 2]\n\n rows, rows1, rows2 = (weather.shape[0],\n weather1.shape[0],\n weather2.shape[0])\n\n weather = pd.merge(weather1, weather2, on=\"Date\")\n weather.drop([\"Station_x\", \"Station_y\"], axis=1, inplace=True)\n\n newrows = weather.shape[0]\n # sanity check the rows\n assert(rows1 + rows2 == rows)\n assert(rows1 == newrows)\n\n return weather", "def read_training(index_columns=None, both=False, weather=False):\n if weather:\n raw_X_train = pd.read_csv('data\\\\train_X.csv', parse_dates=['date'])\n raw_weather = pd.read_csv('data\\\\weather_data.csv', parse_dates=['date'])\n\n raw_X_train = ffill_nans(raw_X_train)\n raw_X_train = raw_X_train.merge(raw_weather, how='left', on=['date','hour'])\n raw_X_train = raw_X_train.set_index(index_columns)\n\n else:\n raw_X_train = pd.read_csv(\n 'data\\\\train_X.csv',\n parse_dates=['date'],\n index_col=index_columns)\n if both:\n raw_y_train = pd.read_csv(\n 'data\\\\train_y.csv',\n parse_dates=['date'],\n index_col=index_columns)\n\n return raw_X_train, raw_y_train\n \n return raw_X_train", "def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def load_data():\n \n data = datasets.load_iris()\n df = pd.DataFrame(data.data, columns = data.feature_names)\n df2 = pd.DataFrame(data.target, columns = [\"target\"])\n\n return df.join(df2)", "def merge_record(self, dt, container = ''): \n record_dataset_legth ={} \n \n \n \"\"\" Combining the ncar_t and ncar_w files.\n If both are present, select the ncar_t data and rename it as 'ncar'. \n If only one is present, simply rename it as 'ncar'. \n \"\"\" \n if ('ncar_t' in list(container.keys()) ):\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_t']['df'] \n \n elif ( 'ncar_w' in list(container.keys()) and 'ncar_t' not in list(container.keys()) ) :\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_w']['df'] \n\n \n for k in container.keys():\n if k == 'ncar_t' or k == 'ncar_w': \n continue \n record_dataset_legth[k] = len(container[k]['df'] )\n \n \n \"\"\" For now, choosing the dataset with more records of all or igra2>ncar>rest data if available and with same number of records \"\"\"\n best_ds, all_ds , best_datasets, all_ds_reports = 'dummy' , [] , [], [] # total number of records, name of the chosen dataset , list of other possible dataset with available data \n \n most_records = max( [ v for v in record_dataset_legth.values() ] ) # maximum number of records per date_time \n \n for k, v in record_dataset_legth.items(): \n if v == 0:\n continue\n if v == most_records:\n best_datasets.append(k) \n if v > 0:\n all_ds.append(k) # all other datasets with smaller number of records than the maximum found\n try: \n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + container[k]['df']['report_id'].values[0] ) # converting the original report id using the same convention as for observation_id\n except:\n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + int( (container[k]['df']['report_id'].values[0]).tostring() ) ) # converting the original report id using the same convention as for observation_id\n \n \n #all_ds_reports.append(np.nan)\n #print ( type(container[k]['df']['report_id'].values) )\n #all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + float(container[k]['df']['report_id'].values[0].decode('latin1') ))\n \n if len(best_datasets) ==0:\n print('wrong??? please check')\n return 0,0,0,0 \n \n if 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'ncar' in best_datasets:\n best_ds = 'ncar'\n elif 'era5_1' in best_datasets:\n best_ds = 'era5_1' \n else:\n best_ds = best_datasets[0]\n \n \"\"\" Extract container \"\"\" \n selected_df = container[best_ds]['df'].copy(deep = True) # might take extra time, dont know how to get rid of this \n\n try:\n merged_report = self.observation_ids_merged[best_ds] * 1000000000 + int( selected_df['report_id'].values[0].tostring() ) \n except:\n merged_report = np.nan \n\n \"\"\" Calculate new unique observation id \"\"\"\n try: \n obs_ids_merged = [ self.observation_ids_merged[best_ds] * 1000000000 + int( i.tostring() ) for i in selected_df['observation_id'] ]\n except:\n obs_ids_merged = [ np.nan for i in selected_df['observation_id'] ]\n \n \n selected_df['observation_id'] = obs_ids_merged\n \n \"\"\" Calculate new unique report id \"\"\" \n selected_df['report_id'] = merged_report\n\n \"\"\" Returning a string with the alternative available datasets data \"\"\"\n if len(all_ds_reports) > 1: \n duplicates = \",\".join( [ str(i) for i in all_ds_reports] )\n else:\n duplicates = str(all_ds_reports[0])\n \n \n \"\"\" Extracting the merged header_table.\n Again, must consider the special case where best_ds == ncar. \n Note that the header table *should* be identical for ncar_w or ncar_t \"\"\" \n if best_ds != 'ncar':\n header = self.get_header_table(dt, ds= best_ds, all_ds = duplicates , length= len(selected_df) )\n \n elif ( best_ds == 'ncar' and 'ncar_t' in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_t', all_ds = duplicates, length= len(selected_df))\n \n elif ( best_ds == 'ncar' and 'ncar_t' not in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_w', all_ds = duplicates, length= len(selected_df) ) \n \n logging.debug('I use %s record since it has more entries: %s but other available datasets are : %s' , best_ds , str(most_records) , all_ds ) \n \n #print ('duplicates are: ', duplicates)\n return selected_df, best_ds , duplicates, header", "def readData():\n pd.set_option('display.expand_frame_repr', False)\n # read data from training_text\n df_text = pd.read_csv('training_text', sep = '\\|\\|', index_col= 'ID',skip_blank_lines =True, nrows = 10, header = None, skiprows = 1, names = ['ID', 'Text'], engine = 'python', encoding = 'utf-8', dtype = str)\n print(\"TEXT COUNT - \" + str(df_text.count()))\n print(\"MISSING TEXT\")\n missing_text = df_text.isnull().sum()\n print(missing_text)\n \n # read data from training_variants\n df_variants = pd.read_csv('training_variants', skip_blank_lines =True, nrows = 10, index_col= 'ID', header = None, skiprows = 1, names = ['ID','Gene','Variation','Class'], engine = 'python', encoding = 'utf-8', dtype = str)\n print(\"VARIANTS COUNT - \" + str(df_variants.count()))\n print(\"MISSING VARIANTS\")\n missing_variants = df_variants.isnull().sum()\n print(missing_variants)\n # merge both datasets\n df = pd.concat([df_text, df_variants], axis = 1)\n return df", "def generate_training_testing_dataset(store_id, transactions, meteo_day, max_days=2500,\n single_barcode=0):\n\n # Get the minimum and maximum of date in the transactions\n min_date = transactions[(transactions['STO_EAN'] == store_id)].min()['TRX_DATETIME'].date()\n max_date = transactions[(transactions['STO_EAN'] == store_id)].max()['TRX_DATETIME'].date()\n\n # Get the number of days between the two date\n num_days = (max_date - min_date).days\n\n # Get the list of unique products barcode in the transactions\n products_barcode = transactions['BARCODE'].unique()\n\n # Only do one single barcode if activated\n if single_barcode is not None:\n products_barcode = [products_barcode[single_barcode]]\n\n\n # Array to contain all training data\n all_data_first_level = []\n\n # For each day and for each product\n for day in xrange(num_days):\n\n print(day)\n\n # If we have already considered more days than allowed, stop\n if day > max_days:\n break\n\n\n # Get the date corresponding to this day\n date = min_date + pd.DateOffset(day)\n # Get the weather of the date\n weather = get_weather_on_date(date, meteo_day, store_id).head(n=1)\n\n # If the weather is empty we skip this day\n if weather.empty:\n continue\n\n # For each product to include\n for product_barcode in products_barcode:\n\n # Get the volume and inventory data\n volume = get_volume_product_on_date(product_barcode, date, store_id, transactions)\n\n # If no volume could be found skip this date,product pair\n if volume is None:\n continue\n\n # Get the type of the current date\n day_type = generate_day_type(date)\n\n\n # Generating complex features based on the simpler one\n\n # This contains respectively yesterday, the day before yesterday and the same day as current one in\n # previous week\n yesterday = date - pd.DateOffset(1)\n two_days_ago = date - pd.DateOffset(2)\n one_week_ago = date - pd.DateOffset(7)\n\n # Get the day type of yesterday and 2 days ago\n day_type_yesterday = generate_day_type(yesterday)\n day_type_2days_ago = generate_day_type(two_days_ago)\n\n # Get the volume of yesterday, 2days ago and 1 week ago\n volume_yesterday = get_volume_product_on_date(product_barcode, yesterday, store_id, transactions)\n volume_2days_ago = get_volume_product_on_date(product_barcode, two_days_ago, store_id, transactions)\n volume_one_week_ago = get_volume_product_on_date(product_barcode, one_week_ago, store_id, transactions)\n\n\n # Get the total sales and the total weight of product done yesterday, 2 days ago and 1 week ago\n volume_price_yesterday = 0\n volume_weight_yesterday = 0\n if volume_yesterday is not None:\n volume_price_yesterday = volume_yesterday[\"price\"]\n volume_weight_yesterday = volume_yesterday[\"weight\"]\n\n volume_price_2days_ago = 0\n volume_weight_2days_ago = 0\n if volume_2days_ago is not None:\n volume_price_2days_ago = volume_2days_ago[\"price\"]\n volume_weight_2days_ago = volume_2days_ago[\"weight\"]\n\n volume_price_one_week_ago = 0\n volume_weight_one_week_ago = 0\n if volume_one_week_ago is not None:\n volume_price_one_week_ago = volume_one_week_ago[\"price\"]\n volume_weight_one_week_ago = volume_one_week_ago[\"weight\"]\n\n\n\n # Using historical weather data\n weather_yesterday = get_weather_on_date(yesterday, meteo_day, store_id).head(n=1)\n temperature_min_yesterday = 0\n temperature_max_yesterday = 0\n if not weather_yesterday.empty:\n temperature_min_yesterday = weather_yesterday['TEMPERATURE_VALUE_MIN'].values[0]\n temperature_max_yesterday = weather_yesterday['TEMPERATURE_VALUE_MIN'].values[0]\n\n\n #tmp = [weather['TEMPERATURE_VALUE_MIN'].values[0], weather['TEMPERATURE_VALUE_MAX'].values[0],\n # weather['PRECIPITATION_VALUE'].values[0], weather['SUNSHINE_DURATION'].values[0],\n # weather['SNOW_DEPTH'].values[0], day_type, volume[\"price\"], volume[\"weight\"]]\n\n\n # Saving Features\n tmp = [weather['TEMPERATURE_VALUE_MIN'].values[0], weather['TEMPERATURE_VALUE_MAX'].values[0],\n day_type, volume[\"price\"], volume_price_yesterday,volume_weight_yesterday,\n volume_price_2days_ago, volume_weight_2days_ago,\n volume_price_one_week_ago, volume_weight_one_week_ago, temperature_min_yesterday,\n temperature_max_yesterday,day_type_yesterday, day_type_2days_ago,\n volume[\"weight\"]]\n\n all_data_first_level.append(tmp)\n\n return all_data_first_level", "def prepare_train_data(self):\r\n ## Impute rlkpis\r\n print(\"Imputing rlKPI df\")\r\n self.rlkpi.add_target_labels(1)\r\n self.rlkpi.impute_rl_kpis()\r\n\r\n print(\"Add 'met-real-station_no' & met-forecast-station_no to rl_kpis_df\")\r\n self.add_met_real_forecast_station_col_to_rlkpis()\r\n print(\"Merge 'met-real-sampled df to rl kps \")\r\n self.merge_met_real_sampled_df_to_rlkpis()\r\n\r\n ## Imputations for met-forecast\r\n print(\"Impute met-forecast\")\r\n met_forecast_obj = self.metfcast\r\n met_forecast_obj.impute_met_forecast()\r\n\r\n #Merge met forecast data to earlier merged data\r\n print(\"Merge Train data with imputed forecast df\")\r\n self.train_data = pd.merge(self.train_data,\r\n met_forecast_obj.imputed_forecast_df,\r\n on=['datetime-station_no'], indicator=True, how='inner')\r\n print(\"Check any imputation needed\", self.train_data.isna().sum().sum())\r\n self.train_data.drop(['_merge'], axis=1, inplace=True)\r\n self.perform_data_under_sampling(self.train_data)", "def _merge_test_output(self, dict_env, env_name):\n for iir, test in enumerate(self.params[\"tests\"]):\n with self.reports[env_name][iir].open() as f:\n report = json.load(f)\n report = _check_dict(report, test[\"name\"])\n # for some plots it's easier to use \"flat\" test structure\n report_flat = _flatten_dict_test(report)\n if iir == 0:\n try:\n df = pd.DataFrame(report)\n except ValueError: # if results are not list\n df = pd.DataFrame(report, index=[0])\n df_flat = pd.DataFrame(report_flat, index=[0])\n else:\n try:\n df = df.merge(pd.DataFrame(report), how=\"outer\")\n except ValueError: # if results are not list\n df = df.merge(pd.DataFrame(report, index=[0]), how=\"outer\")\n df_flat = pd.concat(\n [df_flat, pd.DataFrame(report_flat, index=[0])], axis=1\n )\n\n df_env = pd.DataFrame(dict_env, index=[0])\n df_flat = pd.concat([df_env, df_flat], axis=1)\n df_env = pd.concat([df_env] * len(df)).reset_index(drop=True)\n df = pd.concat([df_env, df], axis=1)\n return df, df_flat", "def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df", "def create_dataset_dict(dataset: str, all_data_dict: dict) -> dict:\n\n return {\n 'branch_comments_embedded_text_df': all_data_dict[dataset][f'branch_comments_embedded_text_df'],\n 'branch_comments_features_df': all_data_dict[dataset][f'branch_comments_features_df'],\n 'branch_comments_user_profiles_df': all_data_dict[dataset][f'branch_comments_user_profiles_df'],\n 'branch_submission_dict': all_data_dict[dataset][f'branch_submission_dict'],\n 'submission_data_dict': all_data_dict[dataset][f'submission_data_dict'],\n 'branch_deltas_data_dict': all_data_dict[dataset][f'branch_deltas_data_dict'],\n 'branches_lengths_list': all_data_dict[dataset][f'branches_lengths_list'],\n 'len_df': all_data_dict[dataset]['len_df']\n }", "def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def merge_two_keyword_chunks(data_first, data_second):\n common_keyword = data_first.columns.intersection(data_second.columns)[0]\n scaling_factor = np.nanmedian(\n data_first[common_keyword] / data_second[common_keyword])\n data_second = data_second.apply(lambda x: x * scaling_factor)\n data = pd.merge(data_first, data_second.drop(\n common_keyword, axis=1), left_index=True, right_index=True)\n return data", "def combined_df(self) -> pd.DataFrame:\n return pd.concat([self.data, self.latest_data.reset_index()], ignore_index=True)", "def combine_train_infer(train_file, infer_dir):\n\n train_df = pd.read_feather(train_file)\n\n time_range = range(len([f for f in os.listdir(infer_dir) if 'feather' in f]))\n infer_df_list = [pd.read_feather(f'{infer_dir}/{t}.feather') for t in time_range]\n\n comb_df_list = []\n train_df.index = [-1] * len(train_df)\n\n comb_df_list.append(train_df)\n\n for t in time_range:\n df = infer_df_list[t]\n df.index = [t] * len(df)\n\n comb_df_list.append(df)\n\n return pd.concat(comb_df_list), train_df, infer_df_list", "def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict", "def data(dataset=\"bio_eventrelated_100hz\"):\n # TODO: one could further improve this function with like\n # selectors 'ecg=True, eda=True, restingstate=True' that would\n # find the most appropriate dataset\n\n dataset = dataset.lower()\n\n # TODO: change this path back to \"master\"\n path = \"https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/\"\n\n # Signals as vectors =======================\n if dataset in [\"eeg\", \"eeg_150hz\", \"eeg.txt\"]:\n return pd.read_csv(path + \"eeg.txt\").values[:, 0]\n\n if dataset in [\"rsp\", \"rsp_1000hz\", \"rsp_1000hz.txt\"]:\n return pd.read_csv(path + \"rsp_1000hz.txt\", header=None).values[:, 0]\n\n if dataset in [\"ecg\", \"ecg_1000hz\", \"ecg_1000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"ecg_3000hz\", \"ecg_3000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"eog\", \"veog\", \"eog_100hz\", \"eog_100hz.csv\"]:\n return pd.read_csv(path + \"eog_100hz.csv\")[\"vEOG\"].values\n\n # Dataframes ===============================\n if dataset == \"iris\":\n info = sklearn_datasets.load_iris()\n data = pd.DataFrame(\n info.data, columns=[\"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\", \"Petal.Width\"]\n )\n data[\"Species\"] = info.target_names[info.target]\n return data\n\n if dataset in [\"eogs\", \"eogs_200hz\", \"eog_200hz\", \"eog_200hz.csv\"]:\n return pd.read_csv(path + \"eog_200hz.csv\")\n\n # Add extension\n if dataset in [\"bio_resting_8min_200hz\"]:\n dataset += \".json\"\n\n # Specific case for json file\n if dataset.endswith(\".json\"):\n if \"https\" not in dataset:\n data = pd.read_json(path + dataset, orient=\"index\")\n else:\n data = pd.read_json(dataset, orient=\"index\")\n df = {}\n for participant, row in data.iterrows():\n for _, data_string in row.items():\n data_list = json.loads(data_string)\n data_pd = pd.DataFrame(data_list)\n df[participant] = data_pd\n\n return df\n\n # TODO: Add more EEG (fif and edf datasets)\n if dataset in [\"eeg_1min_200hz\"]:\n\n return pickle.load(\n urllib.request.urlopen(\n \"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true\"\n )\n )\n\n # General case\n file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable\n if ext == \"\":\n df = pd.read_csv(path + dataset + \".csv\")\n else:\n if \"https\" not in dataset:\n df = pd.read_csv(path + dataset)\n else:\n df = pd.read_csv(dataset)\n return df", "def get_outdoor_data(temp_dir,site):\n if site == 'berk':\n files_od = glob(join(temp_dir,'outdoor','20*.xlsx'))\n elif site == 'bus':\n files_od = glob(join(temp_dir,'outdoor','Busara*.csv'))\n else:\n raise NameError(site)\n\n dfs = []\n for f in files_od:\n if site == 'berk':\n this_df = pd.read_excel(f,sheet_name=0,usecols='B:D',index_col=0,parse_dates=True, header=1)\n elif site == 'bus':\n this_df = pd.read_csv(f,usecols=[0,1,2],index_col=0,parse_dates=True,header=2)\n \n # drop missing values that prevented conversion to float type\n if this_df.iloc[:,0].dtype != np.float64:\n this_df = this_df[this_df.iloc[:,0] != ' ']\n this_df = this_df.astype(np.float64)\n\n # correct for weird timezones in berkeley datalogger\n this_df = correct_tz(this_df,site)\n \n this_df.columns = ['T','RH']\n this_df.index.name = 'time'\n\n # convert to celsius\n this_df['T'] = (this_df['T'] - 32) * 5/9\n dfs.append(this_df)\n \n df_od = pd.concat(dfs)\n\n # drop duplicated measurements\n df_od = df_od[~df_od.index.duplicated(keep='last')].sort_index()\n \n # separate out into daily min,mean,max\n groups = df_od.groupby(df_od.index.date)\n dfs_od = {'all':df_od,\n 'min': groups.min(),\n 'mean': groups.mean(),\n 'max': groups.max()}\n \n for i in ['min','mean','max']:\n # remove first and last day to ignore days where we did not get full recording\n dfs_od[i] = dfs_od[i].iloc[1:-1,:]\n \n # name index so that we can merge onto multiIndex'd dataframe\n dfs_od[i].index.name = 'date'\n \n return dfs_od", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def merge_dfs(raw_dfs_dict: Dict[str, pd.DataFrame]) -> \\\n (pd.DataFrame, Dict[str, pd.DataFrame]):\n\n # List out the dfs available, and make copies of all dfs\n raw_dfs_list = raw_dfs_dict.keys()\n dfs_dict = {}\n for df in raw_dfs_list:\n dfs_dict[df] = raw_dfs_dict[df].copy()\n\n # Run the custom cleaning functions on the dataframes that need them\n dfs_dict['googletrend.csv'] = \\\n clean_googletrend_csv(dfs_dict['googletrend.csv'])\n dfs_dict['store.csv'] = \\\n clean_store_csv(dfs_dict['store.csv'])\n dfs_dict['weather.csv'] = \\\n clean_weather_csv(dfs_dict['weather.csv'])\n\n # Run generic 'clean_other_dfs' function on the other dataframes\n dfs_dict['state_names.csv'] = \\\n clean_other_dfs(dfs_dict['state_names.csv'])\n dfs_dict['store_states.csv'] = \\\n clean_other_dfs(dfs_dict['store_states.csv'])\n dfs_dict['train.csv'] = \\\n clean_other_dfs(dfs_dict['train.csv'])\n\n # Start by merging store_states and state_names\n df = dfs_dict['store_states.csv'].merge(dfs_dict['state_names.csv'],\n on='state')\n # Add in weather\n df = df.merge(dfs_dict['weather.csv'],\n left_on='state_name', right_on='file')\n\n # Drop file and state_name - they are colinear with 'state'\n df.drop(['file', 'state_name'], axis='columns', inplace=True)\n\n # Add in store\n df = df.merge(dfs_dict['store.csv'], on='store')\n\n # Add in train - note that since train.csv has some missing dates, where\n # the store was apparently closed, we use 'outer' to capture all the dates\n df = df.merge(dfs_dict['train.csv'], on=['date', 'store'], how='outer')\n\n # Add in googletrend, making sure to coerce 'date' to datetime first\n df['date'] = pd.to_datetime(df['date'])\n df = df.merge(dfs_dict['googletrend.csv'], on=['date', 'state'])\n\n # final cleanup\n df.loc[df.open.isnull(), 'open'] = 0\n df.loc[df.sales.isnull(), 'sales'] = 0\n df.loc[df.customers.isnull(), 'customers'] = 0\n df.loc[df.promo.isnull(), 'promo'] = 0\n df.loc[df.school_holiday.isnull(), 'school_holiday'] = 0\n df.loc[df.state_holiday.isnull(), 'state_holiday'] = '0'\n df['day_of_week'] = df.date.dt.dayofweek\n df.loc[df.customers == 0, 'open'] = 0\n df['date'] = pd.to_datetime(df['date'])\n df['week_start'] = pd.to_datetime(df['week_start'])\n df.loc[df.open == 0, 'promo'] = 0\n\n new_dict = {}\n for k, v in dfs_dict.items():\n new_dict[k] = v\n return (df, new_dict)", "def load_data(params):\n train_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_train_' + params['stemming'] + '.csv']))\n dev_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_dev_' + params['stemming'] + '.csv']))\n train_data, label_encode = data_prep(train_df, params, if_resample=True)\n dev_data, _ = data_prep(dev_df, params)\n return train_data, dev_data, label_encode", "def prepare_dataset_encoder(self):\n calendar, sales_train, prices_df = self.calendar, self.sales_train, self.prices_df\n agg_endog, agg_idx, agg_sales_train = self.agg_endog, self.agg_idx, self.agg_sales_train\n \n #Prepare exog dataset ---------------------------------------------------------------\n #Prepare calendar exog: event_type & wday on a date\n calendar_exog = pd.DataFrame(index=calendar.index)\n for event_type in ['Sporting', 'Cultural', 'National', 'Religious']:\n calendar_exog['is_{}'.format(event_type)] = np.where((calendar.loc[calendar_exog.index, ['event_type_1', 'event_type_2']] == event_type).any(axis=1), 1, 0)\n wday_encoder = OneHotEncoder(drop='first', sparse=False) #drop Sat.\n wday_df = pd.DataFrame(wday_encoder.fit_transform(calendar.loc[calendar_exog.index, ['wday']]), columns=['w7'] + ['w{}'.format(i) for i in range(1,6)])\n calendar_exog = pd.concat([calendar_exog, wday_df], axis=1)\n \n #Prepare snap_exog: if there is snap event on that date & dept_store ts\n snap_exog = pd.DataFrame(0., index=calendar.index, columns=agg_endog.columns)\n for idx in snap_exog.columns:\n state = sales_train[agg_idx == idx].state_id.unique()[0]\n snap_exog[idx] = calendar.loc[snap_exog.index, 'snap_{}'.format(state)]\n \n #Prepare price discount on that date & dept_store ts\n price_exog = pd.DataFrame(index=calendar.index, columns=agg_endog.columns) #mean price across item_store for a dept_store ts\n for idx in price_exog.columns:\n price_exog[idx] = prices_df.T.loc[agg_idx == idx].mean()\n price_discount = price_exog / price_exog.max() #normalized\n \n self.calendar_exog = calendar_exog\n self.snap_exog = snap_exog\n self.price_discount = price_discount\n \n #Prepare encoder ----------------------------------------------------------------------\n #Create encoder for dept_store_id\n dept_store_encoder = OneHotEncoder(drop='first', sparse=False).fit(agg_sales_train[['dept_id', 'store_id']])\n \n #Create encoder for event name\n calendar['event_name_1'].fillna('missing', inplace=True)\n event_encoder = LabelEncoder().fit(calendar['event_name_1'])\n \n self.dept_store_encoder = dept_store_encoder\n self.event_encoder = event_encoder", "def join(upstream, product):\n a = pd.read_parquet(str(upstream[\"get\"]))\n b = pd.read_parquet(str(upstream[\"features\"]))\n df = a.join(b)\n df.to_parquet(str(product))", "def main(targets):\n # Parse through the datasets and select only relevant columns\n cpu_df = data_exploration.parse_cpu_data(\"data/raw/hw_metric_histo.csv000\")\n sys_df = data_exploration.parse_sys_data(\"data/raw/system_sysinfo_unique_normalized.csv000\")\n\n # Create a new reference to the optimized DataFrame\n optimized_df = data_exploration.optimize_dataframe(cpu_df)\n\n # grab the specific column \"HW::CORE:C0:PERCENT\" as a feature\n cpu = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:C0:PERCENT:\")\n\n # grab the specific column \"HW::CORE:TEMPERATURE:CENTIGRADE\" as a feature\n temp = data_exploration.get_stats(optimized_df, \"name\", \"HW::CORE:TEMPERATURE:CENTIGRADE:\")\n\n # grab the GUIDs from each dataset and put them into lists\n sys_guid = data_exploration.get_guid(sys_df, 'guid')\n hw_guid = data_exploration.get_guid(cpu_df, 'guid')\n\n # checking for the GUID overlap in both datasets\n syshw_overlap = [guid for guid in sys_guid if guid in hw_guid]\n\n # objective is to create a dataframe of only matching GUIDs\n hwcpu_match = data_exploration.get_cpu_guid(cpu, syshw_overlap)\n\n # only grabbing the relevant columns to be matched on\n hwtemp_match = data_exploration.get_temp_guid(temp, syshw_overlap)\n\n # instantiating our dataframes to be joined\n hwtemp = pd.DataFrame(hwtemp_match.groupby('guid')['temp_mean'].mean())\n hwcpu = pd.DataFrame(hwcpu_match.groupby('guid')['utilization_mean'].mean())\n\n # joining our matched dataframes together, only using relevant columns\n combined = sys_df.join(hwcpu, on=['guid'], how='left')\n combined = combined.join(hwtemp, on=['guid'], how='left')\n combined = combined.drop(columns=['guid', 'model_normalized', \"processornumber\"])\n\n # create copy of our joined dataframe to be used for modelling\n feature_columns = combined.copy()\n\n # selecting only relevant columns to use for features\n feature_columns = feature_columns[['os','cpu_family', 'cpuvendor',\n 'graphicscardclass', 'persona']]\n\n # creating a completely one-hot encoded dataframe only containing relevant columns\n dummy = pd.get_dummies(feature_columns)\n\n # converting our categorical variables to be predicted on into numerical values\n cleanup_nums = {'persona': {'Web User': 0, 'Casual User': 1, 'Gamer':2, 'Casual Gamer': 3,\n 'Office/Productivity':4, 'Content Creator/IT': 5,\n 'Communication': 6, 'Win Store App User': 7, 'Entertainment': 8,\n 'File & Network Sharer':9, 'Unknown': 10}}\n\n # replacing the values in the column 'persona' to be numerical\n encode_persona = combined['persona'].to_frame().replace(cleanup_nums)\n\n # putting our old means back into the dummy dataframe\n dummy['util_mean'] = combined['utilization_mean']\n dummy['temp_mean'] = combined['temp_mean']\n # dummy = dummy.drop(columns=['persona'])\n dummy['persona'] = encode_persona['persona']\n\n dummy = dummy.dropna()\n nona_test = dummy.copy()\n\n # we want to predict on Y\n Y = nona_test['persona']\n X = nona_test.drop(columns=['persona'])\n\n # creating our test/train split\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n # all the models we are going to use\n names = [\"Nearest_Neighbors\", \"Linear_SVM\", \"Polynomial_SVM\", \"RBF_SVM\", \"Gradient_Boosting\"]\n\n # all of our predictors scaled to the degree of our datasets\n classifiers = [KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(kernel=\"poly\", degree=3, C=0.025),\n SVC(kernel=\"rbf\", C=1, gamma=2),\n GradientBoostingClassifier(n_estimators=100, learning_rate=1.0)]\n\n scores = []\n # we write in our accuracy scores to [scores]\n for name, clf in zip(names, classifiers):\n clf.fit(X_train, Y_train)\n score = clf.score(X_test, Y_test)\n scores.append(score)\n\n show = data_exploration.get_model_scores(names, scores)\n model_scores = data_exploration.plot_graphical_model_scores(show)", "def get_ensemble_merged_data(self) -> Dict[str, np.ndarray]:\n\n data = {k: v.copy() for k, v in self.data.items()} # deep copy\n\n if self.ensemble_results.empty(): # no ensemble data available\n return data\n\n train_scores, test_scores = self.ensemble_results.train_scores, self.ensemble_results.test_scores\n end_times = self.ensemble_results.end_times\n cur, timestep_size, sign = 0, self.cum_times.size, self.metric._sign\n key_train, key_test = f'ensemble::train::{self.metric.name}', f'ensemble::test::{self.metric.name}'\n\n all_test_perfs_null = all([perf is None for perf in test_scores])\n\n train_perfs = np.full_like(self.cum_times, self.metric._worst_possible_result)\n test_perfs = np.full_like(self.cum_times, self.metric._worst_possible_result)\n\n for timestamp, train_score, test_score in zip(end_times, train_scores, test_scores):\n avail_time = timestamp - self.start_time\n while cur < timestep_size and self.cum_times[cur] < avail_time:\n # Guarantee that cum_times[cur] >= avail_time\n cur += 1\n\n # results[cur] is the closest available checkpoint after or at the avail_time\n # ==> Assign this data to that checkpoint\n time_index = min(cur, timestep_size - 1)\n # If there already exists a previous allocated value, update by a better value\n train_perfs[time_index] = sign * max(sign * train_perfs[time_index], sign * train_score)\n # test_perfs can be none when X_test is not passed\n if not all_test_perfs_null:\n test_perfs[time_index] = sign * max(sign * test_perfs[time_index], sign * test_score)\n\n update_dict = {key_train: train_perfs}\n if not all_test_perfs_null:\n update_dict[key_test] = test_perfs\n\n data.update(update_dict)\n\n return data", "def gen_main_df(add_list: list):\r\n # 由Bert 计算得来的 sentiment信息\r\n if 'sentiment' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sentiment')\r\n sentiment = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'daily_svm_sentiment_6class' , 'csv')[0],\r\n 'date', ['0'], 'sentiment') # 'daily_svm_sentiment_2class' '0', '1', '2', '3', '4', '5'\r\n data_manipulator.add_column(sentiment)\r\n # 中国CPI指数\r\n if 'cpi' in add_list and 'cpi' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('cpi')\r\n cpi = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'CPI', 'csv')[0],\r\n '日期', ['最新值', '涨跌幅', '近3月涨跌幅'], 'CPI')\r\n data_manipulator.add_column(cpi)\r\n # 上海银行间同业拆放利率\r\n if 'shibor' in add_list and 'shibor' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shibor')\r\n shibor = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'shibor', 'csv')[0],\r\n 'date', ['on', '1w', '2w', '1m', '3m'], 'Shibor')\r\n data_manipulator.add_column(shibor)\r\n # 上证综指\r\n if 'shangzheng' in add_list and 'shangzheng' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shangzheng')\r\n shangzheng = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng', 'csv')[0],\r\n 'trade_date', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount',\r\n 'total_mv', 'float_mv', 'total_share', 'float_share',\r\n 'free_share', 'turnover_rate', 'turnover_rate_f', 'pe',\r\n 'pe_ttm', 'pb'],\r\n 'ShangZheng')\r\n data_manipulator.add_column(shangzheng)\r\n data_manipulator.shift_columns(['ShangZheng_pct_chg'], (-1,),\r\n add=True) # name has changed to shift-1_ShangZheng_pct_chg\r\n data_manipulator.rank_df_column(['shift-1_ShangZheng_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n shangzheng_30min = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng_index_30min', 'csv')[0],\r\n 'trade_time', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount'],\r\n 'ShangZheng_30min')\r\n data_manipulator.news_df_add_column(shangzheng_30min)\r\n data_manipulator.shift_minute_columns(['ShangZheng_30min_pct_chg'], (-1,),\r\n add=True)\r\n data_manipulator.rank_minute_df_columns(['shift-1_ShangZheng_30min_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n\r\n # M2 广义货币量\r\n if 'm2' in add_list and 'm2' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('m2')\r\n m2 = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'M2', 'csv')[0],\r\n '月份', ['M2数量(亿元)', 'M2同比增长', 'M2环比增长'], 'M2')\r\n m2 = data_manipulator.complement_df(m2, 'date')\r\n data_manipulator.add_column(m2)\r\n\r\n # 人民币美元汇率\r\n if 'rmb_usd' in add_list and 'rmb_usd' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('rmb_usd')\r\n rmb_usd = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'RMB_USD', 'csv')[0],\r\n 'trade_date',\r\n ['bid_open', 'bid_close', 'bid_high', 'bid_low', 'ask_open',\r\n 'ask_close', 'ask_high', 'ask_low', 'tick_qty'], 'exchange')\r\n data_manipulator.add_column(rmb_usd)\r\n\r\n # 沪港通 沪深通 到岸 离岸资金流\r\n if 'fund_flow' in add_list and 'fund_flow' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('fund_flow')\r\n fund_flow = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'fund_flow', 'csv')[0],\r\n 'trade_date', ['north_money', 'south_money'], 'fund_flow')\r\n data_manipulator.add_column(fund_flow)\r\n\r\n # 债券回购日行情\r\n if 'repo' in add_list and 'repo' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('repo')\r\n repo = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'repo', 'csv')[0],\r\n 'trade_date', ['repo_maturity', 'open', 'high', 'low', 'close',\r\n 'amount'], 'repo', data_manipulator.cut_time_string,\r\n (0, 10,))\r\n repo = data_manipulator.select_col_group_by(repo, 'repo_repo_maturity', ['GC001', 'GC007', 'GC014', 'GC028'],\r\n 'date')\r\n data_manipulator.add_column(repo)\r\n\r\n # 新浪新闻\r\n if 'sina_news' in add_list and 'sina_news' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sina_news')\r\n columns_type = {'create_time': str, 'text': str}\r\n sina_news = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'sina', 'csv')[0],\r\n 'create_time', ['text', ], 'sina', dtypes=columns_type)\r\n data_manipulator.add_change_news('sina', (7, 9), columns_type, sina_news, time_col_name='create_time')\r\n data_manipulator.add_minute_change_news('sina', columns_type, sina_news, time_col_name='create_time')\r\n if 'scale' in add_list:\r\n data_manipulator.scaling_col()\r\n if 'clear' in add_list:\r\n data_manipulator.clear()", "def make_dataset(self):\n # Read raw data\n data = self.read_raw_data()\n self.default_header = list(data.columns.values)\n # Fit the variables on the raw dataset\n self.fit(data.copy())\n return make_df(data, self.features), make_df(data, self.targets)", "def _write_to_dataset(parser1, parser2, dset, rundate):\n\n data_all1 = parser1.as_dict()\n data_all2 = parser2.as_dict()\n if parser1.file_path == parser2.file_path:\n collection = [data_all1]\n else:\n collection = [data_all1, data_all2]\n\n # Meta information\n dset.meta[\"tech\"] = \"slr\"\n dset.meta.add(\"file\", parser1.file_path.stem, section=\"input\")\n dset.meta.add(\"file\", parser2.file_path.stem, section=\"input\")\n dset.meta.add(\"type\", config.tech.obs_format.str.upper(), section=\"input\")\n\n # Make new dict \"obs_data\" containing only data in relevant time interval:\n arc_length = config.tech.arc_length.float\n rundate_datetime = datetime(rundate.year, rundate.month, rundate.day)\n obs_data = dict()\n for data_all in collection:\n for i, x in enumerate(data_all[\"meta\"][\"obs_time\"]):\n if rundate_datetime <= x < rundate_datetime + timedelta(days=arc_length):\n for key in (\"meta\", \"obs\", \"obs_str\"):\n for field, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(field, list()).append(val[i])\n\n data_all.pop(\"meta\")\n data_all.pop(\"obs\")\n data_all.pop(\"obs_str\")\n\n for key in data_all.keys():\n if key.startswith(\"met_\"):\n for key2, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(key2, list()).append(val)\n elif key.startswith(\"satellite_\"):\n # TODO: Use this information in the future?\n continue\n elif key.startswith(\"station_\"):\n # TODO: Use this information in the future?\n continue\n else:\n log.fatal(f\"Unknown data type{key}\")\n\n obs_date = obs_data[\"meta\"][\"obs_date\"]\n time = [obs_date[i] + timedelta(seconds=obs_data[\"meta\"][\"obs_sec\"][i]) for i in range(0, len(obs_date))]\n dset.num_obs = len(obs_data[\"meta\"][\"obs_time\"])\n dset.add_time(\"time\", val=time, scale=\"utc\", fmt=\"datetime\")\n dset.add_text(val=obs_data[\"meta\"][\"station\"], name=\"station\")\n dset.add_text(val=obs_data[\"meta\"][\"satellite\"], name=\"satellite\")\n dset.add_float(val=obs_data[\"meta\"][\"bin_rms\"], unit=\"picoseconds\", name=\"bin_rms\")\n # Positions\n trf = apriori.get(\"trf\", time=dset.time)\n for station in dset.unique(\"station\"):\n trf_site = trf[station]\n station_pos = trf_site.pos.trs.val\n log.debug(f\"Station position for {station} ({trf_site.name}) is (x,y,z) = {station_pos.mean(axis=0)}\")\n domes = trf_site.meta[\"domes\"]\n obs_data[\"pos_\" + station] = station_pos\n obs_data[\"station-other_\" + station] = dict(domes=domes, cdp=station, site_id=station)\n dset.add_position(\n \"site_pos\",\n time=dset.time,\n system=\"trs\",\n val=np.array([obs_data[\"pos_\" + s][idx] for idx, s in enumerate(dset.station)]),\n )\n # Station data\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station_\")])\n for field in sta_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"station_\" + s][field]) for s in dset.station]))\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n for field in sta_fields:\n dset.add_text(field, val=[obs_data[\"station-other_\" + s][field] for s in dset.station])\n\n # Station meta\n station_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n pos_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"pos_\")])\n\n for sta_key, pos_key in zip(station_keys, pos_keys):\n sta_name = sta_key.replace(\"station-other_\", \"\")\n cdp = obs_data[sta_key][\"cdp\"]\n dset.meta.add(sta_name, \"site_id\", cdp)\n longitude, latitude, height, _ = sofa.iau_gc2gd(2, obs_data[pos_key][0, :]) # TODO: Reference ellipsoid\n dset.meta[\"station\"].setdefault(sta_name, {})[\"cdp\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"site_id\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"domes\"] = obs_data[sta_key][\"domes\"]\n dset.meta[\"station\"].setdefault(sta_name, {})[\"marker\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"description\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"longitude\"] = longitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"latitude\"] = latitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"height\"] = height\n\n # Satellite data\n sat_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"satellite_\")])\n for field in sat_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"satellite_\" + s][field]) for s in dset.satellite]))\n\n # Observations\n # In the dataset, obs_time is seconds since rundate:\n v = [\n (obs_data[\"meta\"][\"obs_date\"][i] - rundate_datetime).total_seconds() + obs_data[\"meta\"][\"obs_sec\"][i]\n for i in range(0, dset.num_obs)\n ]\n\n obs_data[\"obs\"].pop(\"obs_time\")\n dset.add_float(\"obs_time\", val=v)\n for field, values in obs_data[\"obs\"].items():\n dset.add_float(field, val=np.array(values))\n\n for field, values in obs_data[\"obs_str\"].items():\n dset.add_text(field, val=values)\n\n return obs_data", "def join_survey_data(survey, deezer):\n\n\n df = survey.rename(columns={'Age': 'user_age', 'Gender': 'user_gender',\n 'deezer_id': 'media_id'})\n\n for index, row in df.iterrows():\n if pd.isnull(row['time']):\n continue\n time = row['time'].split(',')\n if row['user_gender'] == 'Male':\n user_gender = 1\n else:\n user_gender = 0\n if time == None:\n if row['rating'] == 0:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0,\n 999, 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date',\n 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id',\n 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n elif 'Anytime' in time:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n else:\n t_dict = {'Morning': 0, 'Afternoon': 0, 'Evening': 0}\n for t in time:\n t_dict[t] = 1\n for i in [('Morning', 1480513129), ('Afternoon', 1479067262),\n ('Evening', 1478675619)]:\n new = pd.DataFrame(np.array([[999999, i[1], row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], t_dict[i[0]]]]),\n columns=['genre_id',\n 'ts_listen',\n 'media_id',\n 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n\n return deezer", "def merge_tables():\r\n\r\n # get sql connection\r\n conn = get_sql_conn()\r\n\r\n # get all info from materials table\r\n query_mat = 'Select * from material_procurement'\r\n df_mat = pd.read_sql_query(query_mat, con=conn)\r\n df_mat = df_mat.drop(['uid'], axis=1)\r\n df_mat = df_mat.pivot(index='ball_milling_uid',\r\n columns='material_name',\r\n values='mass_fraction')\r\n df_mat = df_mat.reset_index()\r\n df_mat = df_mat.add_prefix('MT-')\r\n\r\n # get all info from ball mill table\r\n query_ball = 'Select * from ball_milling'\r\n df_ball = pd.read_sql_query(query_ball, con=conn)\r\n\r\n # added prefix to distinctly identify a column\r\n df_ball = df_ball.add_prefix('BM-')\r\n\r\n # get all info from hot process\r\n query_hot = 'Select * from hot_press'\r\n df_hot = pd.read_sql_query(query_hot, con=conn)\r\n\r\n # added prefix to distinctly identify a column\r\n df_hot = df_hot.add_prefix('HP-')\r\n\r\n # get all info from hall measurements table\r\n query_hall = 'Select * from hall_measurement'\r\n df_hall = pd.read_sql_query(query_hall, con=conn)\r\n\r\n # get all info from icp measurements table\r\n query_icp = 'Select * from icp_measurement'\r\n df_icp = pd.read_sql_query(query_icp, con=conn)\r\n\r\n # Left merge tables in database starting from materials area to lab reports\r\n df_com = df_ball.merge(df_mat, how='left', left_on='BM-uid',\r\n right_on='MT-ball_milling_uid')\r\n df_com = df_com.merge(df_hot, how='left', left_on='BM-hot_press_uid'\r\n , right_on='HP-uid')\r\n df_com = df_com.merge(df_hall.add_prefix('BM-HA-'), how='left',\r\n left_on='BM-output_material_uid',\r\n right_on='BM-HA-material_uid')\r\n df_com = df_com.merge(df_icp.add_prefix('BM-ICP-'), how='left',\r\n left_on='BM-output_material_uid',\r\n right_on='BM-ICP-material_uid')\r\n df_com = df_com.merge(df_hall.add_prefix('HP-HA-'), how='left',\r\n left_on='HP-output_material_uid',\r\n right_on='HP-HA-material_uid')\r\n df_com = df_com.merge(df_icp.add_prefix('HP-ICP-'), how='left',\r\n left_on='HP-output_material_uid',\r\n right_on='HP-ICP-material_uid')\r\n\r\n # close connection\r\n conn.close()\r\n\r\n # return complete db tables\r\n return df_com", "def join_daily_cweeds_wy2_and_wy3(wy2_df, wy3_df):\n assert wy2_df['CWEEDS Format'] == 'WY2'\n assert wy3_df['CWEEDS Format'] == 'WY3'\n assert wy2_df['Time Format'] == wy3_df['Time Format']\n\n time_wy23 = np.hstack([wy2_df['Time'], wy3_df['Time']])\n time_wy23 = np.unique(time_wy23)\n time_wy23 = np.sort(time_wy23)\n\n wy23_df = {}\n wy23_df['Time Format'] = wy3_df['Time Format']\n wy23_df['CWEEDS Format'] = 'WY2+WY3'\n\n # Copy the header info from WY3 dataset :\n\n for key in ['HORZ version', 'Location', 'Province', 'Country',\n 'Station ID', 'Latitude', 'Longitude', 'Time Zone',\n 'Elevation']:\n wy23_df[key] = wy3_df[key]\n\n # Merge the two datasets :\n\n wy23_df['Time'] = time_wy23\n wy23_df['Years'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Months'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Days'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Hours'] = np.empty(len(time_wy23)).astype(int)\n wy23_df['Irradiance'] = np.empty(len(time_wy23)).astype('float64')\n\n for dataset in [wy2_df, wy3_df]:\n indexes = np.digitize(dataset['Time'], time_wy23, right=True)\n for key in ['Years', 'Months', 'Days', 'Hours', 'Irradiance']:\n wy23_df[key][indexes] = dataset[key]\n\n return wy23_df", "def load_training_data(s3: str = \"s3://epam-hack4med-dataset\") -> pd.DataFrame:\n # Load labels\n df_labels = pd.read_csv(f\"{s3}/CRACoV-ETYKIETY.csv\")\n df_labels[id_cols] = df_labels[id_cols].astype(int)\n df_labels = df_labels.set_index(id_cols)\n labels = df_labels[[basic_target]]\n idx = labels.index\n\n # Load hospital admission file (PRZYJECIE)\n df_admission = pd.read_csv(f\"{s3}/CRACoV-PRZYJECIE.csv\")\n binary_adm_vars = [x for x in basic_adm_vars if df_admission[x].isin([\"Tak\", \"Nie\"]).any()]\n other_adm_vars = [x for x in basic_adm_vars if x not in binary_adm_vars]\n adm = df_admission.copy()\n adm = adm[id_cols + binary_adm_vars + other_adm_vars]\n adm = adm.set_index(id_cols).reindex(idx)\n \n # Load biochem analyses\n biochem_raw = pd.read_csv(f\"{s3}/CRACoV-BIOCHEMIA.csv\", parse_dates=['DATA_WYK']).sort_values('DATA_WYK')\n biochem = (\n biochem_raw.loc[biochem_raw.KOD.isin(basic_bio_codes)]\n .pivot_table(index=['LP.', 'ID_LAB'], columns='KOD', values='WYNIK', aggfunc='first')\n .reindex(idx)\n )\n # Merge it all together\n Xy_raw = pd.concat([labels, adm, biochem], axis='columns')\n return Xy_raw", "def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df", "def prepare_dataset(dataset, manifest):\n\n ## importation\n import pandas as pd\n\n ## craft output_filename\n output_filename = dataset.replace(\".csv\", \"_labeled.csv\")\n\n ## load dataset\n df_data = pd.read_csv(dataset)\n df_data = df_data.set_index('ID')\n\n ## load manifest\n df_cluster = pd.read_csv(manifest)\n\n ## merge\n result = df_data.join(df_cluster.set_index('ID'))\n\n ## drop columns conatining NA\n result = result.dropna(axis='columns')\n\n ## save dataset\n result.to_csv(output_filename)", "def generate_dataset(self):\n sets = {\n \"train\": 10,\n \"test\": 5,\n }\n\n fields = {\n \"strings_list\": lambda x: str_to_ascii(self.generate_string_list(x)),\n \"data\": lambda x: np.random.randint(0, 10, (x, 10)),\n \"number\": lambda x: np.array(range(x)),\n \"field_with_a_long_name_for_printing\": lambda x: np.array(range(x)),\n }\n\n lists = {\n \"list_dummy_data\": np.array(range(10)),\n \"list_dummy_number\": np.array(range(10), dtype=np.uint8),\n }\n\n dataset = {}\n data_fields = {}\n for set_name in sets:\n dataset[set_name] = self.populate_set(sets[set_name], fields, lists)\n data_fields[set_name] = sorted(dataset[set_name].keys())\n\n return dataset, data_fields", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def master_dataframe(threshold):\n\t# import each of the dataframes\n\tgpa = import_gpa_data(GPA_DATA)\n\trank = import_rank_data(CLASS_RANK_DATA)\n\tweek = import_week_attn_data(WEEKLY_ATTN_DATA)\n\tyear = import_year_attn_data(YTD_ATTN_DATA)\n\tswipe = import_swipe_data(SWIPE_DATA)\n\tsat = import_sat([SAT_9, SAT_10, SAT_11])\n\temail = import_student_emails(EMAIL_LIST)\n\tcurr_grades = import_current_grades(CURRENT_GRADES)\n\tservice = import_service_learning([SL_11, SL_12])\n\tmaster_dataframe = pd.concat([email,gpa,rank,sat,week,year,swipe,curr_grades, service],axis=1)\n\t\n\t# fill late_date and late_time columns NaNs\n\tmaster_dataframe[['late_date','late_time']] = master_dataframe[['late_date', 'late_time']].fillna(value='None!')\n\t\n\t# fill in service learning for 9th and 10th graders\n\tmaster_dataframe['service_hours'] = master_dataframe['service_hours'].fillna(value=\"See Note Below for Freshman & Sophomores\")\n\n\t# add the date\n\tmaster_dataframe['start_date'] = START_DATE\n\tmaster_dataframe['end_date'] = END_DATE\n\n\t#master_dataframe[['orange_status']] = master_dataframe[['orange_status']].fillna(value=\"ARE NOT\")\n\tmaster_dataframe[['composite_sat', 'erw_sat', 'math_sat']] = master_dataframe[['composite_sat', \n\t'erw_sat', 'math_sat']].fillna(value=\"Scores coming in mid-May\")\n\t\t\n\t# drop rows missing excessive amounts of data\n\tmaster_dataframe = master_dataframe.dropna(thresh=threshold)\n\n\t# obtain separate dataframes for each grade level\n\t# nine, ten, eleven, twelve = groupby_grade(master_dataframe)\n\n\treturn master_dataframe", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "def create_sample_dataframe():\n ax_readings = []\n ay_readings = []\n az_readings = []\n mx_readings = []\n my_readings = []\n mz_readings = []\n gx_readings = []\n gy_readings = []\n gz_readings = []\n activity_list = [LABELS_NAMES[0] for _ in range(SEGMENT_TIME_SIZE)]\n\n\n for _ in range(SEGMENT_TIME_SIZE):\n ax_readings.append(random.uniform(-10,10))\n ay_readings.append(random.uniform(-10,10))\n az_readings.append(random.uniform(-10,10))\n mx_readings.append(random.uniform(-10,10))\n my_readings.append(random.uniform(-10,10))\n mz_readings.append(random.uniform(-10,10))\n gx_readings.append(random.uniform(-10,10))\n gy_readings.append(random.uniform(-10,10))\n gz_readings.append(random.uniform(-10,10))\n\n data_dict = {\n COLUMN_NAMES[0]: activity_list, COLUMN_NAMES[1]: ax_readings,\n COLUMN_NAMES[2]: ay_readings, COLUMN_NAMES[3]: az_readings,\n COLUMN_NAMES[4]: gx_readings, COLUMN_NAMES[5]: gy_readings,\n COLUMN_NAMES[6]: gz_readings, COLUMN_NAMES[7]: mx_readings,\n COLUMN_NAMES[8]: my_readings, COLUMN_NAMES[9]: mz_readings\n }\n\n df = pd.DataFrame(data=data_dict)\n return df", "def prepare_data(train, test):\n # change the name of the target column\n train.rename(columns={\"revenue\": \"target\"}, inplace=True)\n # map bool values to yes and no\n train[\"Weekend\"] = train[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n test[\"Weekend\"] = test[\"Weekend\"].map({True: \"Yes\", False: \"No\"})\n # set the id col as index\n train.set_index(\"id\", inplace=True)\n test.set_index(\"id\", inplace=True)\n\n # seperate the fetures and the target\n X_train = train.drop(\"target\", axis=1).copy()\n y_train = train[\"target\"].copy()\n X_test = test.copy()\n\n # select numerical and categorical columns\n num_cols = X_train.select_dtypes(exclude=\"object\").columns.tolist()\n cat_cols = X_train.select_dtypes(include=\"object\").columns.tolist()\n\n # numerical pipeline\n num_pipe = make_pipeline(SimpleImputer(strategy=\"mean\"))\n\n # categorical pipeline\n cat_pipe = make_pipeline(\n SimpleImputer(strategy=\"constant\", fill_value=\"NA\"),\n OneHotEncoder(handle_unknown=\"ignore\", sparse=False),\n )\n\n # full pipeline for data preprocessing\n full_pipe = ColumnTransformer(\n [(\"num\", num_pipe, num_cols), (\"cat\", cat_pipe, cat_cols)]\n )\n return X_train, y_train, X_test, full_pipe", "def main(self):\n\n assault_mech_df = self.get_mech_df(url=self.assault_url)\n heavy_mech_df = self.get_mech_df(url=self.heavy_url)\n medium_mech_df = self.get_mech_df(url=self.medium_url)\n light_mech_df = self.get_mech_df(url=self.light_url)\n all_weights_df = pd.concat([assault_mech_df, heavy_mech_df, medium_mech_df, \n light_mech_df])\n\n self.save_data(assault_mech_df, \"assault\")\n self.save_data(heavy_mech_df, \"heavy\")\n self.save_data(medium_mech_df, \"medium\")\n self.save_data(light_mech_df, \"light\")\n self.save_data(all_weights_df, \"all_weights\")\n #get maximum new columns needed for splitting variants\n max_cols = all_weights_df.variants.apply(lambda x: len(x)).max()\n melt_cols = []\n\n for i in range(max_cols):\n all_weights_df[\"var_\"+str(i)] = \"\"\n melt_cols.append(\"var_\"+str(i))\n\n variant_weights_df = pd.DataFrame()\n for index, row in all_weights_df.iterrows():\n for i in range(len(row[\"variants\"])):\n #add each variant to variant weights as a row with mech, tonnage, variant\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n for i in range(len(row[\"hero_chassis\"])):\n new_row_dict = {\n \"mech_name\":row[\"hero_names\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"hero_chassis\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n\n for i in range(len(row[\"special_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"special_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df]) \n\n #add champion variants by matching on \n for i in range(len(row[\"champion_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"champion_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n #remove duplicate rows \n variant_weights_df = variant_weights_df[variant_weights_df.duplicated(keep=\"first\")==False]\n self.save_data(variant_weights_df, \"variant_weights\")", "def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset", "def get_data(data_basename: str = f'{data_folder}/data.csv') -> pd.DataFrame:\n data_path = file_path_relative(data_basename)\n if exists(data_path):\n logger.info(f'reading data from {data_path}')\n moon_data = pd.read_csv(data_path)\n return moon_data\n\n res = requests.get(data_url)\n soup = BeautifulSoup(res.content, features='html.parser')\n\n # get second table from wikipedia\n moon_table = soup.findAll('table', {'class': 'wikitable'})[1]\n # convert to dataframe\n moon_df = pd.read_html(str(moon_table))\n moon_df = pd.DataFrame(moon_df[0])\n\n # sanitize column names\n moon_df.columns = [_sanitize_column_name(\n col) for col in moon_df.columns.values.tolist()]\n\n # sanitize orbital period\n moon_df[orbital_period_column] = moon_df[orbital_period_column].str.replace(\n brackets_remove_regex, '').str.replace('−', '-').str.strip()\n moon_df[orbital_period_column] = pd.to_numeric(\n moon_df[orbital_period_column])\n # days to seconds\n moon_df[orbital_period_column] *= (24 * 60 * 60)\n\n # sanitize semi-major axis\n moon_df[semimajor_axis_column] = moon_df[semimajor_axis_column].str.replace(\n brackets_remove_regex, '').str.strip()\n moon_df[semimajor_axis_column] = pd.to_numeric(\n moon_df[semimajor_axis_column])\n # km to m\n moon_df[semimajor_axis_column] *= 1000\n\n # sanitize mass and sort by it\n mass_column_key: str = 'mass'\n moon_df[mass_column_key] = moon_df[mass_column_key].str.replace(\n '≈', '').str.strip()\n moon_df[mass_column_key] = pd.to_numeric(moon_df[mass_column_key])\n # to kg\n moon_df[mass_column_key] *= 1e16\n moon_df = moon_df.sort_values(by=[mass_column_key], ascending=False)\n\n moon_df.to_csv(data_path, index=False)\n return moon_df", "def study_data(study):\n\n features = ['Test ID',\n 'Patient',\n 'Protocol',\n 'Tissue',\n 'Gender',\n 'Age (years)',\n 'Load (g)',\n 'Summary',\n 'Data']\n\n crushes = pd.DataFrame(columns=features)\n crush_pattern = re.compile(r\"(?P<protocol>\\w+)-\"\n r\"(?P<load>\\d+.?\\d*)g\"\n r\"-?\\d*.csv\")\n for test in study.index:\n path = PATH / study.loc[test, 'Folder Name']\n files = [path / file for file in os.listdir(path)]\n\n # Read all patient crush data and add to dataframe\n for file in files:\n crush_match = crush_pattern.match(file.name)\n if not crush_match:\n continue\n\n # Read and set index to timestamp\n data = pd.read_csv(file)\n data['Timestamp (s)'] = pd.to_timedelta(data['Timestamp (s)'],\n unit='s')\n data = data.set_index('Timestamp (s)')\n\n # Parse meta data and append to end of crushes\n crush_dict = {\n 'Test ID': test,\n 'Patient': study.loc[test, 'Patient Code'].upper(),\n 'Protocol': crush_match.group('protocol').upper(),\n 'Tissue': study.loc[test, 'Classification'].upper(),\n 'Gender': study.loc[test, 'Gender'].upper(),\n 'Age (years)': int((study.loc[test, 'Procedure Date'] -\n study.loc[test, 'DOB']).days) / 365,\n 'Load (g)': int(float(crush_match.group('load'))),\n 'Data': data}\n crush_dict['Summary'] = \"Patient {} ({}), {} crush at {}g\".format(\n crush_dict['Patient'],\n crush_dict['Tissue'],\n crush_dict['Protocol'],\n crush_dict['Load (g)'])\n crushes = crushes.append(crush_dict, ignore_index=True)\n\n types = {'Age (years)': np.float64,\n 'Load (g)': np.int64}\n crushes = crushes.astype(types)\n crushes.index.name = 'Crush'\n return crushes", "def create_metadata(data_dir):\n pool = multiprocessing.Pool()\n raw_feature_paths = [os.path.join(data_dir, \"features.jsonl\")]\n records = list(pool.imap(read_metadata_record, raw_feature_iterator(raw_feature_paths)))\n records = [dict(record, **{\"subset\": \"train\"}) for record in records]\n\n metadf = pd.DataFrame(records)[[\"sha256\", \"appeared\", \"subset\", \"label\"]]\n metadf.to_csv(os.path.join(data_dir, \"metadata.csv\"))\n print(\"\\n[Done] create_metadata\\n\")\n \n return metadf", "def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)", "def fetch_dataset(data_root_dir):\n pattern = \"winemag_dataset_*.csv\"\n\n file_list = glob.glob(os.path.join(data_root_dir, pattern))\n\n df_list = [pd.read_csv(fname) for fname in file_list]\n\n full_df = pd.concat(df_list)\n\n # give unique row names to all\n full_df.index = range(full_df.shape[0])\n\n print(\"Dataset fetched.\")\n return full_df", "def get_main_dataset(self) -> pd.DataFrame:\n pass", "def location_specific_to_dataset(model_run):\n # for every transmission technology, we extract distance information, if it\n # is available\n data_dict = dict()\n\n data_dict[\"distance\"] = dict(\n dims=\"loc_techs_transmission\",\n data=[\n model_run.get_key(\n \"locations.{loc_from}.links.{loc_to}.techs.{tech}.distance\".format(\n **split_loc_techs_transmission(loc_tech)\n ),\n np.nan,\n )\n for loc_tech in model_run.sets[\"loc_techs_transmission\"]\n ],\n )\n # If there is no distance information stored, distance array is deleted\n if data_dict[\"distance\"][\"data\"].count(np.nan) == len(\n data_dict[\"distance\"][\"data\"]\n ):\n del data_dict[\"distance\"]\n\n data_dict[\"lookup_remotes\"] = dict(\n dims=\"loc_techs_transmission\",\n data=concat_iterable(\n [\n (k[\"loc_to\"], k[\"tech\"], k[\"loc_from\"])\n for k in [\n split_loc_techs_transmission(loc_tech)\n for loc_tech in model_run.sets[\"loc_techs_transmission\"]\n ]\n ],\n [\"::\", \":\"],\n ),\n )\n # If there are no remote locations stored, lookup_remotes array is deleted\n if data_dict[\"lookup_remotes\"][\"data\"].count(np.nan) == len(\n data_dict[\"lookup_remotes\"][\"data\"]\n ):\n del data_dict[\"lookup_remotes\"]\n\n data_dict[\"available_area\"] = dict(\n dims=\"locs\",\n data=[\n model_run.locations[loc].get(\"available_area\", np.nan)\n for loc in model_run.sets[\"locs\"]\n ],\n )\n\n # remove this dictionary element if nothing is defined in it\n if set(data_dict[\"available_area\"][\"data\"]) == {np.nan}:\n del data_dict[\"available_area\"]\n\n # Coordinates are defined per location, but may not be defined at all for\n # the model\n if \"coordinates\" in model_run.sets:\n data_dict[\"loc_coordinates\"] = dict(dims=[\"locs\", \"coordinates\"], data=[])\n for loc in model_run.sets[\"locs\"]:\n data_dict[\"loc_coordinates\"][\"data\"].append(\n [\n model_run.locations[loc].coordinates[coordinate]\n for coordinate in model_run.sets.coordinates\n ]\n )\n\n return data_dict", "def join_columns(self, other: \"MultiRegionTimeseriesDataset\") -> \"MultiRegionTimeseriesDataset\":\n if not other.latest_data.empty:\n raise NotImplementedError(\"No support for joining other with latest_data\")\n other_df = other.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n self_df = self.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n other_geo_columns = set(other_df.columns) & set(GEO_DATA_COLUMNS)\n other_ts_columns = (\n set(other_df.columns) - set(GEO_DATA_COLUMNS) - set(TimeseriesDataset.INDEX_FIELDS)\n )\n common_ts_columns = other_ts_columns & set(self.data_with_fips.columns)\n if common_ts_columns:\n # columns to be joined need to be disjoint\n raise ValueError(f\"Columns are in both dataset: {common_ts_columns}\")\n common_geo_columns = list(set(self.data_with_fips.columns) & other_geo_columns)\n # TODO(tom): fix geo columns check, no later than when self.data is changed to contain only\n # timeseries\n # self_common_geo_columns = self_df.loc[:, common_geo_columns].fillna(\"\")\n # other_common_geo_columns = other_df.loc[:, common_geo_columns].fillna(\"\")\n # try:\n # if (self_common_geo_columns != other_common_geo_columns).any(axis=None):\n # unequal_rows = (self_common_geo_columns != other_common_geo_columns).any(axis=1)\n # _log.info(\n # \"Geo data unexpectedly varies\",\n # self_rows=self_df.loc[unequal_rows, common_geo_columns],\n # other_rows=other_df.loc[unequal_rows, common_geo_columns],\n # )\n # raise ValueError(\"Geo data unexpectedly varies\")\n # except Exception:\n # _log.exception(f\"Comparing df {self_common_geo_columns} to {other_common_geo_columns}\")\n # raise\n combined_df = pd.concat([self_df, other_df[list(other_ts_columns)]], axis=1)\n return MultiRegionTimeseriesDataset.from_timeseries_df(\n combined_df.reset_index()\n ).append_latest_df(self.latest_data_with_fips.reset_index())", "def get_total_data():\n return pd.merge(compute_aggregate_load_data(), compute_aggregate_weather_data(),on=\"Date\")", "def all_data(self):\n return pd.concat([self.historic_data, self.dayahead_data])", "def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)", "def creating_DataFrame(group_name,experiments,parameter):\n series = []\n keys_name = []\n for exp in experiments:\n keys_name.append('%s_%s_%s'%(group_name,parameter,exp))\n if parameter == 'II':\n series.append(experiments[exp].II)\n elif parameter == 'KI':\n series.append(experiments[exp].KI)\n elif parameter == 'Reads':\n series.append(experiments[exp].reads)\n elif parameter == 'Bias':\n series.append(experiments[exp].bias)\n \n \n fusion = pd.concat(series, axis = 1, keys= keys_name)#concatantaion of the different experiments\n \n if len(keys_name) > 1:\n \n fusion['%s_%s_mean'%(group_name,parameter)] = fusion.mean(axis = 1)\n fusion['%s_%s_stdev'%(group_name,parameter)] = fusion.std(axis = 1) \n return fusion", "def get_data_from_apis(\n api_keys: dict,\n api_params: dict,\n data_start_date: dt.date,\n data_end_date: dt.date = None,\n providers: list = None,\n save_dirpath: str = None,\n) -> pd.DataFrame:\n if providers is None:\n getters = _GETTERS\n else:\n getters = {k: v for k, v in _GETTERS.items() if k in providers}\n\n for provider in getters.keys():\n if provider not in api_keys.keys():\n raise InvalidAPIKey(f\"No API Key was provided for {provider}.\")\n if provider not in api_params.keys():\n msg = f\"No API requests parameters were provided for {provider}.\"\n raise InvalidAPIRequestsParams(msg)\n\n metadata_list = []\n obs_df_list = []\n # pylint: disable=invalid-name\n for provider, Getter in getters.items():\n getter = Getter(api_key=api_keys[provider])\n data, metadata = getter.get_data(\n series_params=api_params[provider][\"series_params\"],\n start_date=data_start_date,\n end_date=data_end_date,\n )\n metadata_list.append(metadata)\n obs_df_list.append(data)\n\n merged_metadata = reduce(lambda left, right: left + right, metadata_list)\n merged_data = merge_df_list_on(obs_df_list, on=\"date\")\n\n if save_dirpath is not None:\n date = dt.date.today().strftime(\"%Y%m%d\")\n dirpath = Path(ROOT_PATH) / save_dirpath / date\n create_dir_if_missing(dirpath)\n data_path = dirpath / \"raw_data.csv\"\n data.to_csv(data_path, sep=\";\", index=False, encoding=\"utf-8\")\n metadata_path = dirpath / \"metadata.yaml\"\n save_yaml(metadata, metadata_path)\n logger.success(f\"All data retrieved, cleaned and saved to {dirpath}.\")\n\n return merged_data, merged_metadata", "def join_tables(self) -> DataFrame:\n if not self.source_tables:\n raise ValueError(f\"No source tables were provided to GlueTransformer: {self}\")\n\n if len(self.source_tables) == 1:\n # extract the first value from the dictionary\n source_table = list(self.source_tables.values())[0]\n return source_table.get_dyf_and_apply_mapping().toDF()\n \n print(self.join_map.get_chain())\n table_chain = self.join_map.get_chain()\n \n print(f\"table_chain: {table_chain}\")\n \n master_table_name = table_chain[0]\n master_table = self.source_tables[master_table_name]\n #print(dir(master_table))\n print(\"master_table.get_columns()\")\n print(master_table.get_columns())\n master_df = master_table.get_dyf_and_apply_mapping().toDF()\n print(master_df.columns)\n \n absorbed_table_names = [master_table_name]\n \n for next_table_name in table_chain[1:]:\n next_table = self.source_tables[next_table_name]\n\n '''\n print(\"next_table.get_columns()\")\n print(next_table.get_columns())\n '''\n absorbed_table_names.append(next_table_name)\n \n next_df = next_table.get_dyf_and_apply_mapping().toDF()\n next_table_def = next_table.table_def\n next_table_alias = self.resolver.to_alias(*next_table_def.table_spec)\n \n absorbed_source = None\n master_alias = None\n for absorbed_table_name in absorbed_table_names:\n absorbed_table_def = self.source_tables[absorbed_table_name].table_def\n \n master_alias = self.resolver.to_alias(*absorbed_table_def.table_spec)\n # once we find the relevant JOIN, stop and process it\n if (master_alias, next_table_alias) in self.join_map:\n break\n else:\n raise ValueError(f'No link found for \"{next_alias}\"')\n \n join_spec = self.join_map.get(master_alias, next_table_alias)\n # not in base code\n if join_spec.left.table_spec == next_table_alias:\n '''\n print('\\n'.join([\n 'A',\n f'{join_spec.left.table_spec == next_table_alias}',\n f'{join_spec.left.table_spec}',\n f'{next_table_alias}',\n ]))\n '''\n next_key, master_key = join_spec.left, join_spec.right\n else:\n '''\n print('\\n'.join([\n 'B',\n f'{join_spec.left.table_spec == next_table_alias}',\n f'{join_spec.left.table_spec}',\n f'{next_table_alias}',\n ]))\n '''\n master_key, next_key = join_spec.left, join_spec.right\n\n '''\n # DEBUG\n print(f\"JOINDEBUG1 {join_spec}\")\n print(f\"{master_key}, {next_key}\")\n '''\n\n join_type = join_spec.type\n \n master_df_key = GlueTable.translate_alias(*master_key)\n next_df_key = GlueTable.translate_alias(*next_key)\n \n master_df_key = GlueTable.translate_alias(*master_key)\n next_df_key = GlueTable.translate_alias(*next_key)\n \n ''' \n # DEBUG\n print(f'Master table name : {master_table_name} with key {master_df_key}')\n # print(f'No. of records before join master : {master_df.count()}')\n print(master_df.columns)\n print(f'Next table name : {next_table_name} with key {next_df_key}')\n # print(f'No. of records before join next : {next_df.count()}')\n print(next_df.columns)\n \n print(f\"{master_key}, {next_key}\")\n \n '''\n \n try:\n master_df = master_df.join(\n next_df,\n master_df[master_df_key] == next_df[next_df_key],\n join_type,\n )\n except Exception as exc:\n print(exc)\n \n # DEBUG\n # print(f'No. of records after join : {master_df.count()}')\n \n return master_df", "def ConcatDF(train_set, test_set):\n df_all = pd.concat([train_set, test_set], sort=True).reset_index(drop=True)\n df_all.trn_len = train_set.shape[0]\n return df_all", "def get_new_modelling_data():\n # get latest epidemic data from OWID \n\n df = pd.read_json(requests.get(\"https://covid.ourworldindata.org/data/owid-covid-data.json\").content)\n data = pd.DataFrame(df[\"POL\"][\"data\"])\n\n # get latest government restriction data from Oxford tracker\n response = requests.get(\"https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv\").content\n rest = pd.read_csv(io.StringIO(response.decode('utf-8')))\n rest = rest[rest.CountryName == \"Poland\"]\n\n modelling = pd.DataFrame(Mobility.objects.values())\n prepare_model_data(data,rest,modelling)", "def combine_record(self, dt, container = ''):\n \n record_dataset_legth ={} \n other_ds = []\n\n ''' I fill the dic e.g. record_dataset_legth{100:['era5_1','ncar'], 80:['bufr','igra2'] }\n i.e. the keys are the lengths, the entries are the lists of datasets '''\n\n duplicates = []\n\n for k in container.keys(): # loop over the dataset\n if k not in other_ds:\n other_ds.append(k)\n for f in container[k]: # loop over the file per dataset\n num_rec = len(container[k][f]['obs_tab'][\"date_time\"])\n \n \"\"\" Storing all the reports id with the proper prefix (for each different dataset) \"\"\"\n rep_id = b''.join(container[k][f][\"obs_tab\"]['report_id'][0]) \n rep_id = self.observation_ids_merged[k] + rep_id \n duplicates.append( rep_id ) \n \n if num_rec not in record_dataset_legth.keys():\n record_dataset_legth[num_rec] = {}\n record_dataset_legth[num_rec]['best_ds'] = []\n record_dataset_legth[num_rec]['file'] = []\n\n record_dataset_legth[num_rec]['best_ds'].append(k)\n record_dataset_legth[num_rec]['file'].append(f)\n\n max_entries = max(record_dataset_legth.keys())\n \n ''' best_ds is the list of longest datasets, best_datasets the list of all the datasets available including best_ds '''\n best_datasets = record_dataset_legth[max_entries]\n\n \"\"\" Choosing the priority of the datasets:\n - if era5_1 or era5_2 are present, pick them (they cant be both present for the same date_time)\n - else, if igra2 is present, pick it\n - else, one of the remaining ones \"\"\"\n\n if 'era5_2' in best_datasets and 'era5_1' not in best_datasets: # era5_1 and era5_2 should never be both present anyway...\n best_ds = 'era5_2' \n elif 'era5_1' in best_datasets and 'era5_2' not in best_datasets:\n best_ds = 'era5_1'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' not in best_datasets:\n best_ds = record_dataset_legth[max_entries]['best_ds'][0] # pick the first of the list \n\n best_file = record_dataset_legth[max_entries]['file'][0]\n\n ''' If more file are available for the same best_ds, pick the first one from the list '''\n selected_obstab, selected_era5fb = container[best_ds][best_file]['obs_tab'] , container[best_ds][best_file]['era5fb_tab']\n\n ''' Creating the correct observations and record ids. \n All the bytes variable are shrunk to a long |S1 byte variable type, otherwise \n writing in h5py will not work. '''\n \n for var in ['observation_id']:\n if type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.bytes_:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var] ] )\n elif type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.ndarray:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var][:] ] )\n\n for var in ['report_id']:\n val = selected_obstab[var][0]\n if type (selected_obstab[var] ) == np.ndarray and type (val) == np.bytes_:\n value = self.observation_ids_merged[best_ds] + b''.join(val) # it is the same for each row in the table\n elif type (selected_obstab[var] ) == np.ndarray and type (val) == np.ndarray:\n value = self.observation_ids_merged[best_ds] + b''.join(val) \n arr = np.full( (1, len( selected_obstab['date_time']) ) , value )[0] # np.full returns a list of lists\n\n selected_obstab[var] = arr\n\n\n for var in selected_era5fb.keys():\n if type (selected_era5fb[var]) == np.ndarray and type (selected_era5fb[var][0] ) == np.ndarray:\n try:\n selected_era5fb[var] = np.array( [b''.join(l) for l in selected_era5fb[var][:] ] )\n #print('MANAGED FFF', var)\n except:\n value = [b''.join(l) for l in selected_era5fb[var][0] ][0]\n #print('VALUE IS FFF', value)\n selected_era5fb[var] = np.array( (1, len( selected_obstab[var]) ) ).fill(value)\n\n \"\"\" Extracting the header \"\"\"\n selected_head = self.get_header_table(dt, ds = best_ds, File = best_file )\n for var in selected_head.keys():\n if type (selected_head[var] ) == np.ndarray and type (selected_head[var][0] ) == np.bytes_:\n selected_head[var] = np.array( [b''.join(l) for l in selected_head[var][:] ] )\n\n if 'best_ds' == 'era5_1' or best_ds == 'era5_2' :\n selected_obstab['advanced_assimilation_feedback'] = np.array([1]*len(selected_obstab['date_time']) )\n else:\n selected_obstab['advanced_assimilation_feedback'] = np.array([0]*len(selected_obstab['date_time']) )\n\n #best_ds_byte = np.bytes_(best_ds, ndtype = '|S10') # converting to bytes object\n best_ds_byte = np.bytes_(best_ds) # converting to bytes object \n arr = np.full( (1, len( selected_obstab['date_time']) ) , best_ds_byte )[0]\n selected_obstab['source_id'] = arr\n\n duplicate = b','.join(duplicates)\n #selected_head['duplicates'] = np.array(duplicate)\n\n duplicate = np.array(duplicate).astype(dtype='|S70')\n selected_head['duplicates'] = np.array([duplicate])\n selected_head['report_id'] = np.array([selected_obstab['report_id'][0]])\n selected_head['source_id'] = np.array([selected_obstab['source_id'][0]])\n selected_head['record_timestamp'] = np.array([selected_obstab['date_time'][0]])\n\n selected_file = np.bytes_(best_file.split('/')[-1])\n \n return best_ds, selected_obstab, selected_era5fb, selected_head, selected_file, best_file", "def attach_data(df: pd.DataFrame) -> pd.DataFrame:\n # load in parties and constituents data\n # if data is missing ask if scraping is wanted to be performed\n\n with open(os.path.join(instancepath, f\"parties.json\"), \"r\", encoding='utf-8') as json_file:\n parties = json.load(json_file)\n with open(os.path.join(instancepath, f\"constituencies.json\"), \"r\", encoding='utf-8') as json_file:\n constituencies = json.load(json_file)\n \n\n \"\"\" #tätä glob hommaa en saanu toimiin, käy for loopin sisäl vaan yhen kerran ja hakee vaan yhden tiedoston\n # load the scraped data to its own data frame\n df_scraped = pd.DataFrame(columns=['first_name', 'last_name', 'election_number', 'image', 'election_promise_1', 'party', 'constituency'])\n i = 1\n with suppress(KeyError,FileNotFoundError):\n for filename in glob(f\"{instancepath}/candidate*.json\"):\n print(\"jee\")\n with open(filename, \"r\", encoding='utf-8') as json_file:\n candidate = json.load(json_file)\n party_name = None\n constituency_name = None\n for part in parties:\n if part['id'] == candidate[\"party_id\"]:\n party_name = part['name_fi']\n\n for consti in constituencies:\n if consti['id'] == candidate[\"constituency_id\"]:\n constituency_name = consti['name_fi']\n\n df_scraped = df_scraped.append({'first_name': candidate['first_name'], \n 'last_name': candidate['last_name'], \n 'election_number': candidate['election_number'], \n 'image': candidate['image'], \n 'election_promise_1': candidate['info']['election_promise_1'],\n 'party': party_name,\n 'constituency': constituency_name}, \n ignore_index = True)\n #except (FileNotFoundError, KeyError):\n \"\"\"\n\n # load the scraped data to its own data frame\n df_scraped = pd.DataFrame(columns=['first_name', 'last_name', 'election_number', 'image', 'election_promise_1', 'party', 'constituency'])\n i = 1\n while i <= 3000: # files are named arfter candidate ids and ids range is between 1 and 3000\n with suppress(KeyError,FileNotFoundError):\n with open(os.path.join(instancepath, f\"candidate\" + str(i) + \".json\"), \"r\", encoding='utf-8') as json_file:\n candidate = json.load(json_file)\n party_name = None\n constituency_name = None\n for part in parties:\n if part['id'] == candidate[\"party_id\"]:\n party_name = part['name_fi']\n\n for consti in constituencies:\n if consti['id'] == candidate[\"constituency_id\"]:\n constituency_name = consti['name_fi']\n\n df_scraped = df_scraped.append({\n 'first_name': candidate['first_name'].strip(), \n 'last_name': candidate['last_name'].strip(), \n 'election_number': candidate['election_number'], \n 'image': candidate['image'], \n 'election_promise_1': candidate['info']['election_promise_1'],\n 'party': party_name.strip(),\n 'constituency': constituency_name}, \n ignore_index = True)\n i += 1\n\n\n # loading in data of each individual constituent\n # the ids range from 1 to 14\n j = 1\n constituentArray = []\n constituentArray.append(\"\")\n while j <= 14:\n try:\n with open(os.path.join(instancepath, f'constituent' + str(j) + '.json'), \"r\", encoding='utf-8') as json_file:\n constituentArray.append(json.load(json_file))\n except FileNotFoundError:\n constituentArray.append(\"\")\n j += 1\n\n for i, row in df.iterrows():\n promise = row['Mitkä ovat kolme vaalilupaustasi? Vaalilupaus 1:']\n constituency = row['constituency']\n party = row['party']\n df = search_andsetvalues(promise, constituency, party, df, i, df_scraped)\n return df", "def build_dataset(self):\n\n print('begin to build data set...')\n\n # build dictionary\n for e in self.lst_entity:\n self.dictionary_entity[e] = len(self.dictionary_entity)\n\n for r in self.lst_relation:\n self.dictionary_relation[r] = len(self.dictionary_relation)\n\n self.reverse_dictionary_entity = dict(zip(self.dictionary_entity.values(), self.dictionary_entity.keys()))\n self.reverse_dictionary_relation = dict(zip(self.dictionary_relation.values(), self.dictionary_relation.keys()))\n\n # build data map\n self.lst_entity_map = [self.dictionary_entity[e] for e in self.lst_entity]\n self.lst_relation_map = [self.dictionary_relation[r] for r in self.lst_relation]\n self.lst_triplet_train_map = self.map_triplet(self.lst_triplet_train)\n\n # build corrupted candidates for (h,r,~) and (~,r,t)\n for (h, r, t) in self.lst_triplet_train_map:\n if r not in self.dict_tofh:\n self.dict_tofh[r] = {h: [t]}\n else:\n if h not in self.dict_tofh[r]:\n self.dict_tofh[r][h] = [t]\n else:\n self.dict_tofh[r][h].append(t)\n\n if r not in self.dict_hoft:\n self.dict_hoft[r] = {t: [h]}\n else:\n if t not in self.dict_hoft[r]:\n self.dict_hoft[r][t] = [h]\n else:\n self.dict_hoft[r][t].append(h)\n\n for r in self.dict_tofh:\n self.lst_triplet_corrupted_tail[r] = dict()\n for h in self.dict_tofh[r]:\n set_tail_corrupted_all = set(self.lst_entity_map) - set(self.dict_tofh[r][h])\n lst_tail_corrupted_choose = random.sample(set_tail_corrupted_all, 5*len(self.dict_tofh[r][h]))\n self.lst_triplet_corrupted_tail[r][h] = lst_tail_corrupted_choose\n\n for r in self.dict_hoft:\n self.lst_triplet_corrupted_head[r] = dict()\n for t in self.dict_hoft[r]:\n lst_head_corrupted_all = set(self.lst_entity_map) - set(self.dict_hoft[r][t])\n lst_head_corrupted_choose = random.sample(lst_head_corrupted_all, 5*len(self.dict_hoft[r][t]))\n self.lst_triplet_corrupted_head[r][t] = lst_head_corrupted_choose\n\n print('data set has been built successfully!')", "def load_data():\n d = load_wine()\n data = {colname: d.data[:, i] for i, colname in enumerate(d.feature_names)}\n data[\"target\"] = d.target\n return pd.DataFrame(data)", "def create_data_model():\r\n data = {}\r\n data['period'] = int(sheet1.cell_value(1, getColumnIndex(sheet1,'调度周期')))\r\n counttype_technician=3\r\n data['technician']=[]\r\n for i in range(1,1+counttype_technician):\r\n data['technician'].append(int(sheet1.cell_value(i, getColumnIndex(sheet1,'技工日工资'))))\r\n data['base'] = {}\r\n count_base=1 # 码头个数\r\n data['base']['coordinate']=[]\r\n for i in range(1,1+count_base):\r\n base_x=sheet1.cell_value(i, getColumnIndex(sheet1,'码头坐标X'))\r\n base_y=sheet1.cell_value(i, getColumnIndex(sheet1,'码头坐标Y'))\r\n data['base']['coordinate'].append((base_x,base_y))\r\n\r\n data['base']['technician']=[]\r\n for b in range(0,count_base):\r\n data['base']['technician'].append([])\r\n for j in range(counttype_technician):\r\n data['base']['technician'][b].append([])\r\n for i in range(data['period']):\r\n data['base']['technician'][b][j].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'%d类技工总人数'% (j+1)))))\r\n\r\n data['wind_farm'] = {}\r\n count_wind_farm=2 #需要维修的风电场个数\r\n count_wind_turbine=[8,8] #每个风电场需要维修的风机个数\r\n count_wind_turbine_sum=[36,36]# 每个风电场所有的风机个数\r\n data['wind_farm']['maintenance_time']=[]\r\n count_wturbine=[] #用于计数,记录不同风电场风机信息在Excel位置\r\n count_wturbine_l=0\r\n for i in range(count_wind_farm):\r\n count_wturbine.append(count_wturbine_l)\r\n count_wturbine_l=count_wturbine_l+count_wind_turbine[i]\r\n count_turbine=[]\r\n count_turbine_l=0\r\n for i in range(count_wind_farm):\r\n count_turbine.append(count_turbine_l)\r\n count_turbine_l=count_turbine_l+count_wind_turbine_sum[i]\r\n\r\n ###设定与风电场相关的参数\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['maintenance_time'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['maintenance_time'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机维护时间'))))\r\n\r\n data['wind_farm']['technician']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['technician'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['technician'][i].append([])\r\n for k in range(counttype_technician):\r\n data['wind_farm']['technician'][i][j].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'%d类技工需求量'% (k+1)))))\r\n\r\n\r\n data['wind_farm']['parts_weight']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['parts_weight'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['parts_weight'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机所需备件重量'))))\r\n\r\n data['wind_farm']['present']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['present'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['present'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机在维修时是否需要船停泊'))))\r\n\r\n data['wind_farm']['deadline']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['deadline'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['deadline'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'最晚建议维修时间'))))\r\n\r\n data['wind_farm']['penalty_cost']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['penalty_cost'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['penalty_cost'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'逾时惩罚成本'))))\r\n\r\n data['vessel'] = {}\r\n counttype_vessel=3\r\n data['vessel']['capacity']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['capacity'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的备件容量'))))\r\n\r\n data['vessel']['technician']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['technician'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的人员可载量'))))\r\n\r\n data['vessel']['cost']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['cost'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的油费'))))\r\n\r\n data['vessel']['speed']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['speed'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的航速'))))\r\n\r\n data['vessel']['trans_time']=[] # 这里默认转移时间跟船的类型没有关系,与时期有关\r\n for i in range(data['period']):\r\n data['vessel']['trans_time'].append(sheet1.cell_value(i+1, getColumnIndex(sheet1,'技工转移时间')))\r\n\r\n data['vessel']['time_window']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['time_window'].append([])\r\n for j in range(data['period']):\r\n data['vessel']['time_window'][i].append([])\r\n for k in range(count_wind_farm):\r\n data['vessel']['time_window'][i][j].append(int(sheet1.cell_value(j+1, getColumnIndex(sheet1,'风电场%d船%d可作业时间'%(k+1,i+1)))))\r\n\r\n # # 风机坐标\r\n # data['wind_farm']['coordinate']=[]\r\n # for i in range(count_wind_farm):\r\n # data['wind_farm']['coordinate'].append([])\r\n # for j in range(72):\r\n # turbine_x = sheet1.cell_value(j+1, getColumnIndex(sheet1, '风机坐标X'))\r\n # turbine_y = sheet1.cell_value(j+1, getColumnIndex(sheet1, '风机坐标Y'))\r\n # data['wind_farm']['coordinate'][i].append((turbine_x, turbine_y))\r\n\r\n # 风机坐标\r\n data['wind_farm']['coordinate']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['coordinate'].append([])\r\n for j in range(count_wind_turbine_sum[i]):\r\n turbine_x = sheet1.cell_value(j+1+count_turbine[i], getColumnIndex(sheet1, '风机坐标X'))\r\n turbine_y = sheet1.cell_value(j+1+count_turbine[i], getColumnIndex(sheet1, '风机坐标Y'))\r\n data['wind_farm']['coordinate'][i].append((turbine_x, turbine_y))\r\n\r\n\r\n data['wind_farm']['task']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['task'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['task'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'需要维修风机编号'))))\r\n\r\n return data", "def prep_dataset(settings):\n train_dims = settings[\"train_dims\"]\n # Open HDF store. This is usually a soft link to our filtered dataset\n input_df, target_df, const = load_from_store(settings[\"dataset_path\"], columns=train_dims)\n\n try:\n del input_df[\"nions\"] # Delete leftover artifact from dataset split\n except KeyError:\n pass\n\n target_df = drop_outliers(target_df, settings)\n target_df = drop_nans(target_df)\n\n data_df = filter_input(input_df, target_df)\n del target_df, input_df\n data_df = convert_dtype(data_df, settings)\n\n return data_df", "def prep_func(data_dic):\n\n df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())\n\n # combine desired datasets into one dataframe\n for label in dataset_labels:\n df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)\n\n df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names\n\n # dropping unused columns/features\n for col in ['Time', 'trial', 'maneuver']:\n if col in df_all.columns:\n df_all = df_all.drop(columns=[col])\n\n columns_all = df_all.columns.tolist()\n columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data\n\n # all torque features except for roc (mean/std/... & left/right/sum/diff)\n columns_2d_torque = [col for col in df_all.columns.tolist()\n if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]\n\n # all torque features of left and right only (mean/std/... & left/right)\n columns_lr_torque = [col for col in df_all.columns.tolist()\n if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]\n\n columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only\n columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only\n\n # dictionary of list of feature subsets to be used for dimension_reduction or clustering\n featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,\n '2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,\n 'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}\n\n # Standardize features by removing the mean and scaling to unit variance\n scaler = StandardScaler()\n feat_all_stand = scaler.fit_transform(df_all.values)\n df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset\n\n return df_all_stand, df_all_columns, featureSet_dic", "def get_combined_data(self, file_path: str, train_file_name: str,\n test_file_name: str) -> pd.DataFrame:\n train_data=self.load_dataset(file_path,train_file_name)\n train_data=train_data.drop('Survived', 1)\n test_data=self.load_dataset(file_path,test_file_name)\n\n combined_data = train_data.append(test_data)\n combined_data.reset_index(inplace=True)\n combined_data.drop('index', inplace=True, axis=1)\n\n return combined_data", "def make_datasources_table(self, ds: Datastore) -> pd.DataFrame:\n datasets_settings = self.get_datasets()\n # grab all of the datasets that show up by name in the datastore\n datasets_in_datastore_format = {\n name: setting\n for (name, setting) in datasets_settings.items()\n if name in ds.get_known_datasets() and setting is not None\n }\n # add the eia datasets that are nested inside of the eia settings\n if datasets_settings.get(\"eia\", False):\n datasets_in_datastore_format.update(\n {\n \"eia860\": datasets_settings[\"eia\"].eia860,\n \"eia861\": datasets_settings[\"eia\"].eia861,\n \"eia923\": datasets_settings[\"eia\"].eia923,\n }\n )\n\n datasets = datasets_in_datastore_format.keys()\n df = pd.DataFrame(\n data={\n \"datasource\": datasets,\n \"partitions\": [\n json.dumps(datasets_in_datastore_format[dataset].partitions)\n for dataset in datasets\n ],\n \"doi\": [\n _make_doi_clickable(ds.get_datapackage_descriptor(dataset).doi)\n for dataset in datasets\n ],\n }\n )\n # add in EIA860m if eia in general is in the settings and the 860m bool is True\n special_nested_datasets = pd.DataFrame()\n if (\n datasets_settings.get(\"eia\", False)\n and datasets_settings[\"eia\"].eia860.eia860m\n ):\n special_nested_datasets = pd.DataFrame(\n data={\n \"datasource\": [\"eia860m\"],\n \"partitions\": [\n json.dumps(\n datasets_in_datastore_format[\n \"eia860\"\n ].eia860m_data_source.working_partitions\n )\n ],\n \"doi\": [\n _make_doi_clickable(\n ds.get_datapackage_descriptor(\"eia860m\").doi\n )\n ],\n }\n )\n df = pd.concat([df, special_nested_datasets]).reset_index(drop=True)\n df[\"pudl_version\"] = pudl.__version__\n return df", "def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))", "def get_train_csv(self):\n try:\n self.train_article = pd.read_csv(constants.DATA_DIR / 'knn_article_tags.csv')\n except FileNotFoundError:\n train = pd.Series([])\n for csv_file in os.listdir(constants.CLEAN_DIR):\n if csv_file in self.article_feat_csvs:\n df = pd.read_csv(constants.CLEAN_DIR / csv_file)\n feat = csv_file[8:-4]\n g = df.dropna(axis=0).groupby(\"id\")[self.tag_ref[feat]]\n if train.empty:\n train = g.apply(lambda x: list(x.astype(str).str.lower()))\n else:\n g = g.apply(lambda x: list(x.astype(str).str.lower()))\n train = train.combine(g, lambda x1, x2: list(set(x1+x2)), fill_value=[])\n\n train = pd.DataFrame({'id':train.index, 'tags':train.values})\n train.to_csv(constants.DATA_DIR / 'knn_article_tags.csv', header=True)\n self.train_article = train\n\n try:\n self.train_image = pd.read_csv(constants.DATA_DIR / 'knn_image_tags.csv')\n except FileNotFoundError:\n train = pd.Series([])\n for csv_file in os.listdir(constants.CLEAN_DIR):\n if csv_file in self.image_feat_csvs:\n df = pd.read_csv(constants.CLEAN_DIR / csv_file)\n feat = csv_file[6:-4]\n g = df.dropna(axis=0).groupby(\"id\")[self.tag_ref[feat]]\n if train.empty:\n train = g.apply(lambda x: list(x.astype(str).str.lower()))\n else:\n g = g.apply(lambda x: list(x.astype(str).str.lower()))\n train = train.combine(g, lambda x1, x2: list(set(x1+x2)), fill_value=[])\n\n train = pd.DataFrame({'id':train.index, 'tags':train.values})\n train.to_csv(constants.DATA_DIR / 'knn_image_tags.csv', header=True)\n self.train_image = train", "def compile_data():\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n metasp = pd.DataFrame()\r\n for count, ticker in enumerate(tickers):\r\n df = pd.read_csv('sp500_data\\{}.csv'.format(ticker))\r\n df.set_index('Date', inplace=True)\r\n df.rename(columns={'Adj Close': ticker}, inplace=True)\r\n df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)\r\n if metasp.empty:\r\n metasp = df\r\n else:\r\n metasp = metasp.join(df, how = 'outer')\r\n if count % 10 == 0:\r\n print(count)\r\n metasp.to_csv('sp500_meta.csv')", "def tech_specific_to_dataset(model_run):\n data_dict = collections.defaultdict(lambda: {\"dims\": [\"techs\"], \"data\": []})\n\n systemwide_constraints = set(\n [\n k.split(\".\")[-1]\n for k in model_run.techs.keys_nested()\n if \".constraints.\" in k and k.endswith(\"_systemwide\")\n ]\n )\n\n for tech in model_run.sets[\"techs\"]:\n if tech in model_run.sets[\"techs_transmission\"]:\n tech = tech.split(\":\")[0]\n data_dict[\"colors\"][\"data\"].append(\n model_run.techs[tech].get_key(\"essentials.color\")\n )\n data_dict[\"inheritance\"][\"data\"].append(\n \".\".join(model_run.techs[tech].get_key(\"inheritance\"))\n )\n data_dict[\"names\"][\"data\"].append(\n # Default to tech ID if no name is set\n model_run.techs[tech].get_key(\"essentials.name\", tech)\n )\n for k in systemwide_constraints:\n data_dict[k][\"data\"].append(\n model_run.techs[tech].constraints.get_key(k, np.nan)\n )\n\n return data_dict", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def merge_additional_features(df):\n col = [\"hour\",\"day\" ,\"dayofweek\", \"month\" , \"interval\" , \"season\", \"time_of_day\"]\n additional_featues = pd.DataFrame(data = [features_from_timestamp(i) for i in df.index ],columns=col).set_index(df.index)\n data = df.merge(additional_featues,on=\"dt\")\n data.sort_index(inplace=True) #make sure data is sorted by date\n\n return data" ]
[ "0.59869367", "0.57781816", "0.5758608", "0.563408", "0.56181586", "0.55629724", "0.5561206", "0.5549236", "0.5505305", "0.55047166", "0.54905367", "0.54782295", "0.54737127", "0.5469189", "0.544329", "0.54359186", "0.54143375", "0.5409169", "0.5407379", "0.53848493", "0.5383751", "0.537155", "0.53513575", "0.5328619", "0.53272426", "0.53148323", "0.5314481", "0.5307224", "0.5300109", "0.5293462", "0.528227", "0.52821195", "0.52752364", "0.5274859", "0.5274021", "0.5272545", "0.52476585", "0.52402174", "0.5239844", "0.52397233", "0.523887", "0.52321184", "0.52163583", "0.5209267", "0.5207735", "0.5200038", "0.51911104", "0.5182598", "0.5175809", "0.51547885", "0.51320076", "0.5126824", "0.51264983", "0.51145905", "0.5112535", "0.5111287", "0.51108164", "0.5107959", "0.510703", "0.5102748", "0.5101997", "0.5101146", "0.50914747", "0.5085896", "0.5082043", "0.5081602", "0.50780046", "0.507572", "0.5074242", "0.5071947", "0.5061297", "0.50612575", "0.5053104", "0.5051316", "0.5051034", "0.5043877", "0.50424814", "0.5039054", "0.5035488", "0.50338286", "0.5031947", "0.50277495", "0.5025689", "0.50220585", "0.5018929", "0.5014955", "0.50139886", "0.50136125", "0.5010588", "0.50058544", "0.5004375", "0.4991295", "0.49891528", "0.49844638", "0.49834916", "0.49826515", "0.49806607", "0.4976854", "0.49763048", "0.4970876" ]
0.69934803
0
Split the joined data into a dict with a df for each meter type
def split_on_meter_type(joined_data, meter_types): joined_data_dict = {meter_type: joined_data[joined_data['meter_type'] == meter_type] for meter_type in meter_types} return joined_data_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df", "def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result", "def extract_data(self):\n values = {}\n for injkey in self.data_sets.keys():\n values[injkey] = {}\n alldata = self.data_sets[injkey]\n paramkeys = alldata['params'].keys()\n for datakey in alldata.keys():\n if not datakey == 'params':\n values[injkey][datakey] = {}\n values[injkey][datakey]['metric_val'] = {}\n values[injkey][datakey]['metric_val']['vals'] = []\n for paramkey in paramkeys:\n values[injkey][datakey][paramkey] = {}\n values[injkey][datakey][paramkey]['vals'] = []\n trials = alldata[datakey]\n for trial_num in trials.keys():\n trial = trials[trial_num]\n values[injkey][datakey]['metric_val']['vals'] \\\n .append(trial['metric_val'])\n values[injkey][datakey]['metric_val']['type'] \\\n = trial['metric']\n values[injkey][datakey]['metric_val']['units'] \\\n = 'dimensionless'\n param_vals = trial['params']\n for param_name in param_vals.keys():\n val, units = self.parse_pint_string(\n pint_string=param_vals[param_name]\n )\n values[injkey][datakey][param_name]['vals'] \\\n .append(float(val))\n values[injkey][datakey][param_name]['units'] \\\n = units\n self.values = values", "def prepare_data(groups):\n all_dicts = []\n for idx, group in groups:\n res_dict = {'organism': group.organism.iloc[0]}\n for g_idx, row in group.iterrows():\n if pd.notna(row.label):\n res_dict[row.cmp_name] = {'label': row.label, 'mic': row.MIC}\n else:\n res_dict[row.cmp_name] = {'label': '', 'mic': row.MIC}\n all_dicts.append(res_dict)\n return all_dicts", "def _make_meta(self):\n available_meas_times = list()\n available_intervals = list()\n drill_by = list()\n related = list()\n last_data_set_instance = dict()\n\n if self._data['report_save_historical_instances_ind'] == 'Y':\n # last measurement instance\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance['measurement_time'] = self._formatter.format_date(last_data_set_instance['measurement_time'])\n\n # available measurement instances\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\"\"\",(self._id, self._segment_value_id))\n if res:\n for data_set_instance in self._db.record:\n data_set_instance['measurement_time'] = self._formatter.format_date(data_set_instance['measurement_time'])\n available_meas_times.append(data_set_instance)\n \n\n # get drill by. not for this version\n\n # available measurement intervals\n if self._data['report_primary_shared_dimension_id'] is None:\n self._data['report_primary_shared_dimension_id'] = 0\n\n self._db.Query(\"\"\"\n SELECT measurement_interval.*,\n dashboard_element.element_id\n FROM dashboard_element\n LEFT JOIN measurement_interval\n ON measurement_interval.measurement_interval_id = dashboard_element.measurement_interval_id\n WHERE\n (dashboard_element.`element_id`<>%s\n AND dashboard_element.measurement_interval_id <> %s\n AND dashboard_element.shared_measure_id = %s\n AND dashboard_element.`type` = 'internal report'\n AND ifnull(dashboard_element.report_used_for_drill_to_ind,'N') = %s\n AND ifnull(dashboard_element.report_primary_shared_dimension_id,0) = %s\n AND ifnull(dashboard_element.segment_id,0) = %s)\n OR\n dashboard_element.`element_id`=%s\n AND 3=4\n \n GROUP BY measurement_interval.measurement_interval_id\n ORDER BY\n measurement_interval.display_sequence,\n dashboard_element.name ASC\n \"\"\",\n (self._id,\n self._data['measurement_interval_id'],\n self._data['shared_measure_id'],\n self._data['report_used_for_drill_to_ind'],\n self._data['report_primary_shared_dimension_id'],\n self._data['segment_id'],\n self._id))\n\n\n for interval in self._db.record:\n interval['report_data_set_instance_id'] = 0\n available_intervals.append(interval)\n\n # see related\n self._db.Query(\"\"\"SELECT e.*\n FROM dashboard_element_topic det, dashboard_element e\n WHERE e.element_id = det.dashboard_element_id\n AND dashboard_element_id <> %s\n AND e.enabled_ind = 'Y'\n AND topic_id IN (select topic_id from dashboard_element_topic where dashboard_element_id = %s)\n UNION SELECT e.*\n FROM dashboard_element e, metric_drill_to_report m\n WHERE m.metric_element_id = e.element_id\n AND m.report_element_id = %s\n AND e.enabled_ind = 'Y'\n AND ifnull(e.segment_id,0) = %s\n \"\"\", (self._id, self._id, self._id, self._data['segment_id']))\n \n\n for related_element in self._db.record:\n if not related_element['segment_id']:\n related_element['segment_id'] = 0\n if related_element['segment_id'] == self._data['segment_id']:\n related_element['segment_value_id'] = self._segment_value_id\n else:\n related_element['segment_value_id'] = 0\n related.append(related_element)\n\n # elements displayed on the page\n before_dataset = list()\n after_dataset = list()\n \n charts_before_dataset = list()\n charts_after_dataset = list()\n \n \n # dataset table\n dataset_el = OrderedDict()\n dataset_el['element_id'] = ''\n dataset_el['element_type'] = 'dataset'\n dataset_el['element_name'] = ''\n dataset_el['element_desc'] = ''\n dataset_el['placement'] = ''\n dataset_el['sequence'] = 0\n dataset_el['show_ind'] = self._data['show_data_set_table_in_report_ind']\n \n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND \n (ISNULL(report_data_set_pivot_id)\n OR report_data_set_pivot_id = 0) \n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n charts_before_dataset.append(chart_el)\n else:\n charts_after_dataset.append(chart_el)\n \n # pivots\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_pivot\n WHERE\n `element_id`= %s\n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for pivot in self._db.record:\n before_pivot = list()\n after_pivot = list()\n #pivot_element = list()\n \n pivot_el = OrderedDict()\n pivot_el['element_id'] = pivot['report_data_set_pivot_id']\n pivot_el['element_type'] = 'pivot'\n pivot_el['element_name'] = pivot['name']\n pivot_el['element_desc'] = ''\n pivot_el['placement'] = pivot['pivot_table_report_placement']\n pivot_el['sequence'] = pivot['display_sequence']\n pivot_el['show_ind'] = pivot['enabled_ind']\n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND report_data_set_pivot_id = %s \n ORDER BY display_sequence ASC\"\"\",\n (self._id, pivot_el['element_id']))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n before_pivot.append(chart_el)\n else:\n after_pivot.append(chart_el)\n pivot_element = before_pivot + [pivot_el] + after_pivot \n \n if pivot_el['placement'] == 'before data set':\n before_dataset += pivot_element\n else:\n after_dataset += pivot_element\n elements = charts_before_dataset + before_dataset + [dataset_el] + after_dataset + charts_after_dataset\n \n \n self._jfile.make_current_meta(last_data_set_instance,\n available_meas_times,\n available_intervals,\n drill_by,\n related,\n elements,\n self._segment_values)", "def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]", "def group_data():\n\n # Merge on Departure.\n\n # Merge on Arrival.\n\n data = pd.read_csv(path + \"/data/public/public_train.csv\")[[\"DateOfDeparture\", \"Arrival\"]]\n data['DateOfDeparture'] = pd.to_datetime(data['DateOfDeparture'])\n\n arrival = join_cleaned_data().\\\n rename(columns={'Date': 'DateOfDeparture', 'Airport': 'Arrival'}).\\\n set_index(\"DateOfDeparture\")\n\n merged_arrv = pd.merge(data, arrival, on=[\"DateOfDeparture\", \"Arrival\"], how=\"left\")\n\n # Rename and drop columns.\n\n merged_arrv.columns = [c + \"_Arrival\" if c not in [\"DateOfDeparture\",\n \"DateOfArrival\",\n \"Arrival\",\n \"WeeksToDeparture\"]\n else c\n for c in merged_arrv.columns]\n print merged_arrv\n merged_arrv = merged_arrv.drop([\"Arrival\"], axis=1)\n\n # Concatenate the two fields.\n # merged_all = pd.concat([merged_arrv, merged_dept], axis=1)\n\n merged_all = merged_arrv.\\\n convert_objects(convert_numeric=True)\n merged_all.to_csv(path + \"/Submission/temperatures.csv\")", "def collect_data():\n\n \"Aqui va el codigo de alberto para recoger los datos que puede venir en forma de diccionario\"\n #TODO: Función para recoger los datos de los bms y meterlos en diccionarios (Alberto jr.)\n\n bms1 = dict()\n bms2 = dict()\n bms3 = dict()\n general = dict()\n\n\n # Ejemplos de datos para meter en los diccionarios\n\n temperature = 35.5\n voltage1 = 15.2\n voltage2 = 14.8\n date = time.strftime(\"%Y-%m-%d\") # Current date\n t = time.strftime(\"%H:%M:%S\") # Current time\n\n return bms1, bms2, bms3, general", "def process_data(data, engine):\n\n def convert(x):\n unit = x['standard_units']\n value = x['standard_value']\n if unit == \"NM\":\n return value * 1e-9\n elif unit == \"-LOG(10) M\":\n return 10 ** (-value)\n else:\n raise RuntimeError\n\n # Filter Na\n data.dropna(how=\"any\", subset=USED_COLS, inplace=True)\n # Only keep measurements that are KD related\n data = data[data.standard_type.isin(KD_TYPES)]\n # Only keep measurements with some defined units\n data = data[data.standard_units.isin(KD_UNITS)]\n # Convert to M valued units\n data['standard_value'] = data.apply(convert, axis=1)\n # Keep only equal relation measurements\n data = data[data.standard_relation == '=']\n # Remove multiple targets measurements\n data = data[~data.target_id.str.contains(',')]\n # Remove (target,compound) pairs with more than one measurement\n key = ['standard_inchi_key', 'target_id']\n grouped = data.groupby(key).size()\n join_condition = grouped[grouped == 1].reset_index()[key]\n data = data.merge(join_condition, on=key, how='inner')\n # Remove outliers measurements\n data = data[(data.standard_value <= 1.7e-3) & (data.standard_value >= 1.e-10)]\n # Convert to PK values\n data['standard_value'] = - np.log10(data['standard_value'])\n # Remove samples for which the protein_id has no sequence\n sequence_loader = SequenceLoader(engine=engine)\n data = sequence_loader.transform(data).dropna(how=\"any\")\n # Remove samples for which the compound_id has no inchi\n inchi_loader = InchiLoader(engine=engine)\n data = inchi_loader.transform(data).dropna(how='any')\n # We will only use the following columns\n return data[[\"standard_inchi_key\", \"target_id\", \"standard_inchi\", \"sequence\", \"standard_value\"]]", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def join_survey_data(survey, deezer):\n\n\n df = survey.rename(columns={'Age': 'user_age', 'Gender': 'user_gender',\n 'deezer_id': 'media_id'})\n\n for index, row in df.iterrows():\n if pd.isnull(row['time']):\n continue\n time = row['time'].split(',')\n if row['user_gender'] == 'Male':\n user_gender = 1\n else:\n user_gender = 0\n if time == None:\n if row['rating'] == 0:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0,\n 999, 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date',\n 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id',\n 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n elif 'Anytime' in time:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n else:\n t_dict = {'Morning': 0, 'Afternoon': 0, 'Evening': 0}\n for t in time:\n t_dict[t] = 1\n for i in [('Morning', 1480513129), ('Afternoon', 1479067262),\n ('Evening', 1478675619)]:\n new = pd.DataFrame(np.array([[999999, i[1], row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], t_dict[i[0]]]]),\n columns=['genre_id',\n 'ts_listen',\n 'media_id',\n 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n\n return deezer", "def splitByBand(reader):\r\n try:\r\n total_prod_cost = 0\r\n total_album_sales = 0\r\n len = 0\r\n\r\n for row in reader:\r\n theDict = d[row[bandCol]] # step1: gets the dictionary for the band\r\n\r\n al_sale = theDict[AlbumSales] # step2 gets the list of album sales for the band\r\n al_sale.append(float(row[album_sales_col])) #step3 appends current row value to the list\r\n theDict[AlbumSales] = al_sale #step4 updates the value to 'AlbumSales' key\r\n\r\n pc = theDict[ProdCost] #step5 gets the list of production cost for the band\r\n pc.append(float(row[prod_cost_col])) #step6 appends the current row value to the list\r\n theDict[ProdCost] = pc #step7 updates the value to 'ProdCost'\r\n d[row[bandCol]] = theDict #step8 updates value for d with updated theDict\r\n except Exception as e:\r\n print('Exception in splitByBand')\r\n raise e", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data", "def train(self, metergroup):\n # Inizialise stats and training data:\n self.stats = []\n self.onpower_train = pd.DataFrame(columns=['onpower'])\n self.offpower_train = pd.DataFrame(columns=['offpower'])\n self.duration_train = pd.DataFrame(columns=['duration'])\n\n # Calling train_on_chunk by meter:\n instance = 1 # initial instance.\n for meter in metergroup.meters:\n for chunk in meter.power_series():\n if chunk.empty:\n print(\"Chunk empty\")\n else:\n print(\"Training on chunk\")\n if self.sampling_method is not None:\n how = lambda df: getattr(df, self.sampling_method)()\n else:\n how = lambda df: df.mean()\n \n self.train_on_chunk(how(pd.DataFrame(chunk.resample(\n self.sample_period))),\n meter\n )\n\n instance += 1", "def splitter (data1, data2):\n flow_data = list()\n fare_record_data = list()\n\n for line in data1:\n line = [line[2:6],line[6:10],line[10:15],line[15:18],line[18],line[19],line[36:39],line[20:28],line[28:36],line[42:49]]\n flow_data.append(line)\n\n flow = pd.DataFrame(flow_data, columns=[\"ORIGIN_CODE\",\"DESTINATION_CODE\",\"ROUTE_CODE\",\"STATUS_CODE\",\"USAGE_CODE\",\"DIRECTION\",\"TOC\",\"VALID_UNTIL\",\"VALID_FROM\",\"FLOW_ID\"])\n flow['ROUTE_CODE'] = flow['ROUTE_CODE'].astype(object)\n flow.index.name=\"flow_idx\"\n\n for line in data2:\n line=[line[2:9],line[9:12],line[12:20]]\n fare_record_data.append(line)\n\n fare_record = pd.DataFrame(fare_record_data, columns=[\"FLOW_ID\",\"TICKET_CODE\",\"FARE\"])\n fare_record.index.name = \"fare_record_idx\"\n\n return flow,fare_record", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def data_parser(df):\n\n chunks = []\n for row in df.itertuples():\n piece = {}\n piece['datetime'] = row[1]\n piece[row[3]] = row[4]\n chunks.append(piece)\n\n # Join dicts on shared 'datetime' keys.\n combine = defaultdict(dict)\n for elem in chunks:\n combine[elem['datetime']].update(elem)\n\n ordered = sorted(combine.values(), key=itemgetter(\"datetime\"))\n\n mapped_generation = []\n for item in ordered:\n mapped_types = [(mapping.get(k, k), v) for k, v in item.items()]\n\n # Need to avoid multiple 'unknown' keys overwriting.\n complete_production = defaultdict(lambda: 0.0)\n for key, val in mapped_types:\n try:\n complete_production[key] += val\n except TypeError:\n # Datetime is a string at this point!\n complete_production[key] = val\n\n dt = complete_production.pop('datetime')\n final = (dt, dict(complete_production))\n mapped_generation.append(final)\n\n return mapped_generation", "def _get_data(self) -> dict:\n LOGGER.debug(f\"Setting data property for {self.dirname}\")\n data = {}\n for axis in range(1, 4):\n # Subsample by 8 since this does not vary quickly\n data[f\"aoatter{axis}\"] = (\n self.tlm[f\"aoatter{axis}\"].vals[::ATT_ERR_SUBSAMP].astype(np.float32)\n )\n data[\"aokalstr\"] = self.tlm[\"aokalstr\"].vals\n # fmt: off\n data[\"npnt_kalm\"] = (\n (self.tlm[\"aopcadmd\"].vals == \"NPNT\")\n & (self.tlm[\"aoacaseq\"].vals == \"KALM\")\n )\n # fmt: on\n for slot in range(8):\n data[f\"aca_track{slot}\"] = self.tlm[f\"aoacfct{slot}\"].vals == \"TRAK\"\n data[f\"aca_ir{slot}\"] = self.tlm[f\"aoaciir{slot}\"].vals == \"ERR\"\n data[\"times\"] = self.tlm[\"aokalstr\"].times\n data[\"perigee_times\"] = self.tlm.perigee_times.astype(np.float32)\n data[\"perigee\"] = self.perigee.date\n data[\"rad_entry\"] = self.rad_entry.date\n data[\"rad_exit\"] = self.rad_exit.date\n data[\"obss\"] = self.obss.as_array()\n\n return data", "def load_meters_from_buliding(self, target_building, meters_name=[], sample_rate = '1min'):\n if self.df is None:\n self.read_data_from_csv()\n \n if len(meters_name) < 1 :\n meters_name = self.meter_name.keys()\n\n if 'main' in meters_name:\n meters_name.remove('main')\n \n building_meters = self.df.groupby('buildingid').get_group(target_building)\n building_meters.index = pd.to_datetime(building_meters['reporttime'], format='%Y-%m-%d %H:%M:%S')\n building_meters = building_meters.groupby('channelid')\n building_channels = building_meters.groups.keys()\n \n if self.meter_name['main'][0] not in building_channels: return\n buliding_df = building_meters.get_group(self.meter_name['main'][0]).rename(columns={\"w\": \"main\"})\n buliding_df = buliding_df.resample(sample_rate, how='mean')\n target_meters = ['main']\n\n for meter, channel_ids in self.meter_name.iteritems():\n if meter in meters_name and channel_ids[0] in building_channels:\n appliance_meter = building_meters.get_group(channel_ids[0]).rename(columns={\"w\": meter})\n \n for channel_id in channel_ids[1:]:\n if channel_id not in building_channels: continue\n another_channel = building_meters.get_group(channel_id).rename(columns={\"w\": meter})\n appliance_meter.append(another_channel)\n\n appliance_meter = appliance_meter.resample(sample_rate, how='mean')\n buliding_df = pd.merge(buliding_df, appliance_meter, right_index=True, left_index=True, how='left')\n target_meters.append(meter)\n \n buliding_df = buliding_df[target_meters]\n buliding_df = buliding_df[~buliding_df.index.duplicated()]\n if buliding_df is not None:\n self.buliding_df.setdefault(target_building, buliding_df)\n \n return buliding_df", "def stats_data_by_type():\n photos = db_session.query(MediaFiles.size).filter(MediaFiles.duration == 0)\n videos = db_session.query(MediaFiles.size).filter(MediaFiles.duration > 0)\n result = [{'name': 'Photos', 'color': '#76BCEB',\n 'data': [photos.with_entities(func.sum(MediaFiles.size)).all()[0][0],\n photos.count()]},\n {'name': 'Videos', 'color': '#397DAA',\n 'data': [videos.with_entities(func.sum(MediaFiles.size)).all()[0][0],\n videos.count()]}]\n return result", "def get_datalist_fr_json(self):\n raw_data = json.load(open(self.saved_json_file, 'r'))\n for indivdual_set in raw_data['query']['results']['stats']:\n temp_dict_data = {}\n if type(indivdual_set) == str:\n #for single data\n continue # temp do not use\n for parameters in indivdual_set.keys():\n if type(indivdual_set[parameters]) == str:\n temp_dict_data[parameters] = indivdual_set[parameters]#for symbol\n elif type(indivdual_set[parameters]) == dict:\n if indivdual_set[parameters].has_key('content'):\n temp_dict_data[parameters] = indivdual_set[parameters]['content']\n\n ## append to list\n self.com_data_allstock_list.append(temp_dict_data)", "def structure_by_package(mel):\n \"\"\"receives in a pandas dataframe\"\"\"\n string='K10024-'\n WP='00'\n l={}\n mel['Level 1','Level 2','Level 3','Level 4']=''\n mel['WP']=mel['Level'].str.replace('.','',regex=True) \n for i,row in mel.iterrows():\n print (WP)\n if (type(row['WP Activity/ Part No.']) is str) and (string in row['WP Activity/ Part No.']) :\n #new section starts:\n WP=row['WP Activity/ Part No.']\n l[row['Level']]=row['Equipment Description']\n \n mel.loc[i,'WP']=WP\n for key in l.keys():\n mel.loc[i,'Level ' +key]=l[key]\n \n mel.dropna(subset=['Delivery','WP'], inplace=True)\n \n mel['WP']=mel['WP'].str.replace('K10024-','',regex=False) \n mel['WP']=mel['WP'].str[:2]\n mel.drop(columns=['Level'],inplace=True) \n mel.to_excel('packages_MEL02.xlsx')\n return mel", "def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results", "def group_data_by_gs(data_table):\n gene_data = collections.defaultdict(lambda: collections.defaultdict(list))\n for _idx, row in data_table.iterrows():\n samp = row['sample']\n gene = row['gene']\n gene_data[gene][samp].append({\n 'muttype': row['type'].strip(),\n 'normalized': row['Normalized'], # NMAF in the manuscript\n 'consequence': row['MissenseConsequence'].strip(),\n })\n return gene_data", "def _composition_handler(self):\n return {\n group : StockAnalyzer(data) \\\n for group, data in self.data.groupby(self.group_by)\n }", "def get_all_data(ds_names, ds_types, indxs, fields, **kwargs):\n data = {f:{} for f in fields+['time']}\n\n for ds_type, keys in ds_types.items():\n for dsk in keys:\n print('Getting data for: ',dsk)\n\n dsf = ds_names[dsk]\n\n if ds_type == 'maven':\n ds = pd.read_csv(dsf)\n for field in fields:\n\n ds_dat = get_ds_data(ds, field, indxs[ds_type],\n maven=True, grid=False)\n data[field][dsk] = ds_dat\n time = get_ds_data(ds, 'time', indxs[ds_type],\n maven=True, grid=False)\n time = time-time[0]\n time = time/time[-1]\n data['time'][dsk] = time\n \n\n\n else:\n for field in fields:\n with h5py.File(dsf, 'r') as ds:\n \n if '_x' in field or '_y' in field or '_z' in field:\n get_data_func = get_rotated_data\n else: get_data_func = get_ds_data\n try:\n ds_dat = get_data_func(ds, field, indxs[ds_type],\n grid='batsrus' not in ds_type, **kwargs)\n #grid=ds_type=='heliosares', **kwargs)\n except ValueError:\n ds_dat = np.array([])\n data[field][dsk] = ds_dat\n\n data['time'][dsk] = np.linspace(0, 1, np.max(indxs[ds_type].shape))\n\n return data", "def combine_record(self, dt, container = ''):\n \n record_dataset_legth ={} \n other_ds = []\n\n ''' I fill the dic e.g. record_dataset_legth{100:['era5_1','ncar'], 80:['bufr','igra2'] }\n i.e. the keys are the lengths, the entries are the lists of datasets '''\n\n duplicates = []\n\n for k in container.keys(): # loop over the dataset\n if k not in other_ds:\n other_ds.append(k)\n for f in container[k]: # loop over the file per dataset\n num_rec = len(container[k][f]['obs_tab'][\"date_time\"])\n \n \"\"\" Storing all the reports id with the proper prefix (for each different dataset) \"\"\"\n rep_id = b''.join(container[k][f][\"obs_tab\"]['report_id'][0]) \n rep_id = self.observation_ids_merged[k] + rep_id \n duplicates.append( rep_id ) \n \n if num_rec not in record_dataset_legth.keys():\n record_dataset_legth[num_rec] = {}\n record_dataset_legth[num_rec]['best_ds'] = []\n record_dataset_legth[num_rec]['file'] = []\n\n record_dataset_legth[num_rec]['best_ds'].append(k)\n record_dataset_legth[num_rec]['file'].append(f)\n\n max_entries = max(record_dataset_legth.keys())\n \n ''' best_ds is the list of longest datasets, best_datasets the list of all the datasets available including best_ds '''\n best_datasets = record_dataset_legth[max_entries]\n\n \"\"\" Choosing the priority of the datasets:\n - if era5_1 or era5_2 are present, pick them (they cant be both present for the same date_time)\n - else, if igra2 is present, pick it\n - else, one of the remaining ones \"\"\"\n\n if 'era5_2' in best_datasets and 'era5_1' not in best_datasets: # era5_1 and era5_2 should never be both present anyway...\n best_ds = 'era5_2' \n elif 'era5_1' in best_datasets and 'era5_2' not in best_datasets:\n best_ds = 'era5_1'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' not in best_datasets:\n best_ds = record_dataset_legth[max_entries]['best_ds'][0] # pick the first of the list \n\n best_file = record_dataset_legth[max_entries]['file'][0]\n\n ''' If more file are available for the same best_ds, pick the first one from the list '''\n selected_obstab, selected_era5fb = container[best_ds][best_file]['obs_tab'] , container[best_ds][best_file]['era5fb_tab']\n\n ''' Creating the correct observations and record ids. \n All the bytes variable are shrunk to a long |S1 byte variable type, otherwise \n writing in h5py will not work. '''\n \n for var in ['observation_id']:\n if type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.bytes_:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var] ] )\n elif type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.ndarray:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var][:] ] )\n\n for var in ['report_id']:\n val = selected_obstab[var][0]\n if type (selected_obstab[var] ) == np.ndarray and type (val) == np.bytes_:\n value = self.observation_ids_merged[best_ds] + b''.join(val) # it is the same for each row in the table\n elif type (selected_obstab[var] ) == np.ndarray and type (val) == np.ndarray:\n value = self.observation_ids_merged[best_ds] + b''.join(val) \n arr = np.full( (1, len( selected_obstab['date_time']) ) , value )[0] # np.full returns a list of lists\n\n selected_obstab[var] = arr\n\n\n for var in selected_era5fb.keys():\n if type (selected_era5fb[var]) == np.ndarray and type (selected_era5fb[var][0] ) == np.ndarray:\n try:\n selected_era5fb[var] = np.array( [b''.join(l) for l in selected_era5fb[var][:] ] )\n #print('MANAGED FFF', var)\n except:\n value = [b''.join(l) for l in selected_era5fb[var][0] ][0]\n #print('VALUE IS FFF', value)\n selected_era5fb[var] = np.array( (1, len( selected_obstab[var]) ) ).fill(value)\n\n \"\"\" Extracting the header \"\"\"\n selected_head = self.get_header_table(dt, ds = best_ds, File = best_file )\n for var in selected_head.keys():\n if type (selected_head[var] ) == np.ndarray and type (selected_head[var][0] ) == np.bytes_:\n selected_head[var] = np.array( [b''.join(l) for l in selected_head[var][:] ] )\n\n if 'best_ds' == 'era5_1' or best_ds == 'era5_2' :\n selected_obstab['advanced_assimilation_feedback'] = np.array([1]*len(selected_obstab['date_time']) )\n else:\n selected_obstab['advanced_assimilation_feedback'] = np.array([0]*len(selected_obstab['date_time']) )\n\n #best_ds_byte = np.bytes_(best_ds, ndtype = '|S10') # converting to bytes object\n best_ds_byte = np.bytes_(best_ds) # converting to bytes object \n arr = np.full( (1, len( selected_obstab['date_time']) ) , best_ds_byte )[0]\n selected_obstab['source_id'] = arr\n\n duplicate = b','.join(duplicates)\n #selected_head['duplicates'] = np.array(duplicate)\n\n duplicate = np.array(duplicate).astype(dtype='|S70')\n selected_head['duplicates'] = np.array([duplicate])\n selected_head['report_id'] = np.array([selected_obstab['report_id'][0]])\n selected_head['source_id'] = np.array([selected_obstab['source_id'][0]])\n selected_head['record_timestamp'] = np.array([selected_obstab['date_time'][0]])\n\n selected_file = np.bytes_(best_file.split('/')[-1])\n \n return best_ds, selected_obstab, selected_era5fb, selected_head, selected_file, best_file", "def carrier_specific_to_dataset(model_run):\n carrier_tiers = model_run.sets[\"carrier_tiers\"]\n loc_tech_dict = {k: [] for k in model_run.sets[\"loc_techs_conversion_plus\"]}\n data_dict = dict()\n # Set information per carrier tier ('out', 'out_2', 'in', etc.)\n # for conversion-plus technologies\n if model_run.sets[\"loc_techs_conversion_plus\"]:\n # carrier ratios are the floating point numbers used to compare one\n # carrier_in/_out value with another carrier_in/_out value\n data_dict[\"carrier_ratios\"] = dict(\n dims=[\"carrier_tiers\", \"loc_tech_carriers_conversion_plus\"], data=[]\n )\n for carrier_tier in carrier_tiers:\n data = []\n for loc_tech_carrier in model_run.sets[\"loc_tech_carriers_conversion_plus\"]:\n loc, tech, carrier = loc_tech_carrier.split(\"::\")\n carrier_ratio = (\n model_run.locations[loc]\n .techs[tech]\n .constraints.get_key(\n \"carrier_ratios.carrier_\" + carrier_tier + \".\" + carrier, 1\n )\n )\n data.append(carrier_ratio)\n loc_tech_dict[loc + \"::\" + tech].append(carrier_ratio)\n data_dict[\"carrier_ratios\"][\"data\"].append(data)\n\n # Additional system-wide constraints from model_run.model\n if model_run.model.get(\"reserve_margin\", {}) != {}:\n data_dict[\"reserve_margin\"] = {\n \"data\": [\n model_run.model.reserve_margin.get(c, np.nan)\n for c in model_run.sets[\"carriers\"]\n ],\n \"dims\": \"carriers\",\n }\n\n return data_dict", "def __object_demapper(self, data: list) -> pd.DataFrame:\n data = pd.DataFrame.from_records([s.to_dict() for s in data])\n\n return data", "def combine_data(spectras, compounds) :\n final = {}\n for hmdb_id, spec_objs in spectras.items() :\n c = compounds.pop(hmdb_id, None)\n if not c :\n continue\n c.spectras = spec_objs\n final[hmdb_id] = c\n return final", "def format_odometer(raw: list) -> dict:\n instruments: dict = {}\n for instrument in raw:\n instruments[instrument[\"type\"]] = instrument[\"value\"]\n if \"unit\" in instrument:\n instruments[instrument[\"type\"] + \"_unit\"] = instrument[\"unit\"]\n\n return instruments", "def process_data_group(folder:Path, type:str, light:bool = False) -> dict:\n\n if type == dm.Delivery:\n data_folder = folder / 'data'\n else:\n data_folder = folder\n\n # check for non-existent or empty folder\n if not data_folder.exists():\n raise FileNotFoundError\n try:\n next((data_folder).glob(\"**/*\"))\n except StopIteration:\n # folder is empty can't process it\n raise FileNotFoundError\n\n # Get file sizes, last modified dates, and names to count,\n # sum size, and hash the file data provided\n file_sizes, file_modified_dates, file_metamodified_dates, file_names = zip(\n *[\n (f.stat().st_size, f.stat().st_mtime, f.stat().st_ctime, f)\n for f in (data_folder).glob(\"**/*\")\n if f.is_file() and f.name != 'receipt.rst'\n ]\n )\n\n last_modified = datetime.fromtimestamp(\n max(max(file_modified_dates),\n max(file_metamodified_dates)))\n\n # Hash the files in the delivery\n if light:\n folder_hash = 'skipped'\n else:\n folder_hash = hash_files(file_names)\n\n dg = {\n 'name' : folder.name,\n 'type' : type.__name__,\n 'last_update' : datetime.now(),\n 'size' : sum(file_sizes),\n 'num_files' : len(file_sizes),\n 'group_hash' : folder_hash,\n 'group_last_modified' : last_modified,\n }\n\n return dg", "def create_data_model():\r\n data = {}\r\n data['period'] = int(sheet1.cell_value(1, getColumnIndex(sheet1,'调度周期')))\r\n counttype_technician=3\r\n data['technician']=[]\r\n for i in range(1,1+counttype_technician):\r\n data['technician'].append(int(sheet1.cell_value(i, getColumnIndex(sheet1,'技工日工资'))))\r\n data['base'] = {}\r\n count_base=1 # 码头个数\r\n data['base']['coordinate']=[]\r\n for i in range(1,1+count_base):\r\n base_x=sheet1.cell_value(i, getColumnIndex(sheet1,'码头坐标X'))\r\n base_y=sheet1.cell_value(i, getColumnIndex(sheet1,'码头坐标Y'))\r\n data['base']['coordinate'].append((base_x,base_y))\r\n\r\n data['base']['technician']=[]\r\n for b in range(0,count_base):\r\n data['base']['technician'].append([])\r\n for j in range(counttype_technician):\r\n data['base']['technician'][b].append([])\r\n for i in range(data['period']):\r\n data['base']['technician'][b][j].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'%d类技工总人数'% (j+1)))))\r\n\r\n data['wind_farm'] = {}\r\n count_wind_farm=2 #需要维修的风电场个数\r\n count_wind_turbine=[8,8] #每个风电场需要维修的风机个数\r\n count_wind_turbine_sum=[36,36]# 每个风电场所有的风机个数\r\n data['wind_farm']['maintenance_time']=[]\r\n count_wturbine=[] #用于计数,记录不同风电场风机信息在Excel位置\r\n count_wturbine_l=0\r\n for i in range(count_wind_farm):\r\n count_wturbine.append(count_wturbine_l)\r\n count_wturbine_l=count_wturbine_l+count_wind_turbine[i]\r\n count_turbine=[]\r\n count_turbine_l=0\r\n for i in range(count_wind_farm):\r\n count_turbine.append(count_turbine_l)\r\n count_turbine_l=count_turbine_l+count_wind_turbine_sum[i]\r\n\r\n ###设定与风电场相关的参数\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['maintenance_time'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['maintenance_time'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机维护时间'))))\r\n\r\n data['wind_farm']['technician']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['technician'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['technician'][i].append([])\r\n for k in range(counttype_technician):\r\n data['wind_farm']['technician'][i][j].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'%d类技工需求量'% (k+1)))))\r\n\r\n\r\n data['wind_farm']['parts_weight']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['parts_weight'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['parts_weight'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机所需备件重量'))))\r\n\r\n data['wind_farm']['present']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['present'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['present'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机在维修时是否需要船停泊'))))\r\n\r\n data['wind_farm']['deadline']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['deadline'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['deadline'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'最晚建议维修时间'))))\r\n\r\n data['wind_farm']['penalty_cost']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['penalty_cost'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['penalty_cost'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'逾时惩罚成本'))))\r\n\r\n data['vessel'] = {}\r\n counttype_vessel=3\r\n data['vessel']['capacity']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['capacity'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的备件容量'))))\r\n\r\n data['vessel']['technician']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['technician'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的人员可载量'))))\r\n\r\n data['vessel']['cost']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['cost'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的油费'))))\r\n\r\n data['vessel']['speed']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['speed'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的航速'))))\r\n\r\n data['vessel']['trans_time']=[] # 这里默认转移时间跟船的类型没有关系,与时期有关\r\n for i in range(data['period']):\r\n data['vessel']['trans_time'].append(sheet1.cell_value(i+1, getColumnIndex(sheet1,'技工转移时间')))\r\n\r\n data['vessel']['time_window']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['time_window'].append([])\r\n for j in range(data['period']):\r\n data['vessel']['time_window'][i].append([])\r\n for k in range(count_wind_farm):\r\n data['vessel']['time_window'][i][j].append(int(sheet1.cell_value(j+1, getColumnIndex(sheet1,'风电场%d船%d可作业时间'%(k+1,i+1)))))\r\n\r\n # # 风机坐标\r\n # data['wind_farm']['coordinate']=[]\r\n # for i in range(count_wind_farm):\r\n # data['wind_farm']['coordinate'].append([])\r\n # for j in range(72):\r\n # turbine_x = sheet1.cell_value(j+1, getColumnIndex(sheet1, '风机坐标X'))\r\n # turbine_y = sheet1.cell_value(j+1, getColumnIndex(sheet1, '风机坐标Y'))\r\n # data['wind_farm']['coordinate'][i].append((turbine_x, turbine_y))\r\n\r\n # 风机坐标\r\n data['wind_farm']['coordinate']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['coordinate'].append([])\r\n for j in range(count_wind_turbine_sum[i]):\r\n turbine_x = sheet1.cell_value(j+1+count_turbine[i], getColumnIndex(sheet1, '风机坐标X'))\r\n turbine_y = sheet1.cell_value(j+1+count_turbine[i], getColumnIndex(sheet1, '风机坐标Y'))\r\n data['wind_farm']['coordinate'][i].append((turbine_x, turbine_y))\r\n\r\n\r\n data['wind_farm']['task']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['task'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['task'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'需要维修风机编号'))))\r\n\r\n return data", "def format_odometer(raw) -> dict:\r\n instruments: dict = {}\r\n for instrument in raw:\r\n instruments[instrument[\"type\"]] = instrument[\"value\"]\r\n if \"unit\" in instrument:\r\n instruments[instrument[\"type\"] + \"_unit\"] = instrument[\"unit\"]\r\n\r\n return instruments", "def map_segmentation_to_dataframe( segmentation_type, segmentation_image ):\n mydf_fn = get_data( segmentation_type )\n mydf = pd.read_csv( mydf_fn )\n mylgo = ants.label_geometry_measures( segmentation_image )\n return pd.merge( mydf, mylgo, how='left', on=[\"Label\"] )", "def df_2_dict(df,band_list,lens_model_list,source_model_list,lens_light_model_list):\n \n import re\n from lenstronomy.Util.param_util import ellipticity2phi_q\n from lenstronomy.Util.param_util import shear_cartesian2polar\n \n import pandas as pd\n \n\n model_kwarg_names = get_kwarg_names(lens_model_list,source_model_list,\n lens_light_model_list,None)\n \n IDs = df.loc[:,'ID']\n chi_sq = df.loc[:,'reduced chi^2']\n \n lens_dict = {}\n \n for i,prof in enumerate(lens_model_list):\n lens_dict[prof] = {}\n for param in model_kwarg_names['kwargs_lens'][i]:\n col = df.loc[:,'{}_lens.{}'.format(prof,param)]\n col_array = col.values\n lens_dict[prof][param] = col_array\n \n if 'e1' in model_kwarg_names['kwargs_lens'][i]:\n lens_dict[prof]['q'] = np.array([])\n lens_dict[prof]['phi'] = np.array([]) \n for j in range(len(lens_dict[prof]['e1'])):\n phi,q = ellipticity2phi_q(lens_dict[prof]['e1'][j],lens_dict[prof]['e2'][j])\n lens_dict[prof]['q'] = np.append(lens_dict[prof]['q'],q)\n lens_dict[prof]['phi'] = np.append(lens_dict[prof]['phi'],phi)\n elif 'gamma1' in model_kwarg_names['kwargs_lens'][i]:\n lens_dict[prof]['gamma'] = np.array([])\n lens_dict[prof]['theta'] = np.array([])\n for j in range(len(lens_dict[prof]['gamma1'])):\n theta,gamma = shear_cartesian2polar(lens_dict[prof]['gamma1'][j],lens_dict[prof]['gamma2'][j])\n lens_dict[prof]['gamma'] = np.append(lens_dict[prof]['gamma'],gamma)\n lens_dict[prof]['theta'] = np.append(lens_dict[prof]['theta'],theta)\n \n \n source_dict = {}\n lens_light_dict = {}\n \n for i,band in enumerate(band_list):\n for j,prof in enumerate(source_model_list):\n key = '{} Band: {}'.format(band,prof)\n source_dict[key] = {}\n for param in model_kwarg_names['kwargs_source'][j]:\n col = df.loc[:,'{} Band: {}_source.{}'.format(band,prof,param)]\n col_array = col.values\n source_dict[key][param] = col_array\n \n if 'e1' in model_kwarg_names['kwargs_source'][j]:\n source_dict[key]['q'] = np.array([])\n source_dict[key]['phi'] = np.array([]) \n for k in range(len(source_dict[key]['e1'])):\n phi,q = ellipticity2phi_q(source_dict[key]['e1'][k],source_dict[key]['e2'][k])\n source_dict[key]['q'] = np.append(source_dict[key]['q'],q)\n source_dict[key]['phi'] = np.append(source_dict[key]['phi'],phi)\n \n for j,prof in enumerate(lens_light_model_list):\n key = '{} Band: {}'.format(band,prof)\n lens_light_dict[key] = {}\n for param in model_kwarg_names['kwargs_lens_light'][j]:\n col = df.loc[:,'{} Band: {}_lens_light.{}'.format(band,prof,param)]\n col_array = col.values\n lens_light_dict[key][param] = col_array\n\n if 'e1' in model_kwarg_names['kwargs_lens_light'][j]:\n lens_light_dict[key]['q'] = np.array([])\n lens_light_dict[key]['phi'] = np.array([]) \n for k in range(len(lens_light_dict[key]['e1'])):\n phi,q = ellipticity2phi_q(lens_light_dict[key]['e1'][k],lens_light_dict[key]['e2'][k])\n lens_light_dict[key]['q'] = np.append(lens_light_dict[key]['q'],q)\n lens_light_dict[key]['phi'] = np.append(lens_light_dict[key]['phi'],phi)\n \n params_dict = {'Object IDs': IDs.values,'Reduced Chi^2': chi_sq.values,\n 'lens': lens_dict, 'source': source_dict, 'lens_light': lens_light_dict}\n \n return params_dict", "def get_data_subsets(t0, t1):\n\n # Iridium data:\n irid = iridium[(iridium.time >= t0) & (iridium.time <= t1)]\n irid_B = np.vstack((irid.B_e.values, irid.B_n.values, irid.B_r.values))\n irid_coords = np.vstack((irid.lon.values, irid.lat.values, irid.r.values))\n\n # SuperMAG data:\n smag = supermag.loc[t0:t1, :]\n smag_B = np.vstack((smag.Be.values, smag.Bn.values, smag.Bu.values))\n smag_coords = np.vstack((smag.lon.values, smag.lat.values))\n\n # SuperDARN data:\n sd = superdarn.loc[t0:t1, :]\n vlos = sd['vlos'].values\n sd_coords = np.vstack((sd['glon'].values, sd['glat'].values))\n los = np.vstack((sd['le'].values, sd['ln'].values))\n\n\n # Make the data objects. The scale keyword determines a weight for the dataset. Increase it to reduce weight\n iridium_data = lompe.Data(irid_B * 1e-9, irid_coords, datatype = 'space_mag_fac', scale = 200e-9)\n supermag_data = lompe.Data(smag_B * 1e-9, smag_coords, datatype = 'ground_mag' , scale = 100e-9)\n superdarn_data = lompe.Data(vlos , sd_coords , LOS = los, datatype = 'convection' , scale = 500 )\n\n return(iridium_data, supermag_data, superdarn_data)", "def main(self):\n\n assault_mech_df = self.get_mech_df(url=self.assault_url)\n heavy_mech_df = self.get_mech_df(url=self.heavy_url)\n medium_mech_df = self.get_mech_df(url=self.medium_url)\n light_mech_df = self.get_mech_df(url=self.light_url)\n all_weights_df = pd.concat([assault_mech_df, heavy_mech_df, medium_mech_df, \n light_mech_df])\n\n self.save_data(assault_mech_df, \"assault\")\n self.save_data(heavy_mech_df, \"heavy\")\n self.save_data(medium_mech_df, \"medium\")\n self.save_data(light_mech_df, \"light\")\n self.save_data(all_weights_df, \"all_weights\")\n #get maximum new columns needed for splitting variants\n max_cols = all_weights_df.variants.apply(lambda x: len(x)).max()\n melt_cols = []\n\n for i in range(max_cols):\n all_weights_df[\"var_\"+str(i)] = \"\"\n melt_cols.append(\"var_\"+str(i))\n\n variant_weights_df = pd.DataFrame()\n for index, row in all_weights_df.iterrows():\n for i in range(len(row[\"variants\"])):\n #add each variant to variant weights as a row with mech, tonnage, variant\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n for i in range(len(row[\"hero_chassis\"])):\n new_row_dict = {\n \"mech_name\":row[\"hero_names\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"hero_chassis\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n\n\n for i in range(len(row[\"special_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"special_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df]) \n\n #add champion variants by matching on \n for i in range(len(row[\"champion_variants\"])):\n new_row_dict = {\n \"mech_name\":row[\"mechs\"],\n \"tonnage\":row[\"tonnage\"],\n \"variant\":row[\"champion_variants\"][i].upper()\n }\n new_row_df = pd.DataFrame(new_row_dict, index=[0])\n variant_weights_df = pd.concat([variant_weights_df, new_row_df])\n #remove duplicate rows \n variant_weights_df = variant_weights_df[variant_weights_df.duplicated(keep=\"first\")==False]\n self.save_data(variant_weights_df, \"variant_weights\")", "def join_input_data_and_multi_index(data, dataset_name):\n\n meter_df = data[dataset_name]\n building_df = data['building_metadata']\n weather_df = data['weather_' + dataset_name]\n\n # join meter and weather data\n building_n_meter = meter_df.merge(building_df, on='building_id', how='left')\n joined_data = building_n_meter.merge(weather_df, on=['site_id', 'timestamp'], how='left')\n\n # Add time related columns\n joined_data['hour'] = joined_data['timestamp'].dt.hour\n joined_data['weekday'] = joined_data['timestamp'].dt.dayofweek\n joined_data['week_number'] = joined_data['timestamp'].dt.week\n joined_data['month'] = joined_data['timestamp'].dt.month\n\n joined_data['is_weekend'] = joined_data['weekday'].apply(lambda x: 1 if x in [0, 6] else 0)\n\n # multi index on building id and timestamp\n joined_data = joined_data.set_index(['building_id', 'timestamp']).sort_index()\n\n return joined_data", "def data_partition(num_workers, data_set, separate=True):\n\n size = data_set.data.shape[0]\n ind = list(range(size))\n\n if separate:\n shuffle(ind)\n # worker_size is the number of samples per worker. The last worker however receives the additional samples\n worker_size = size // num_workers\n data = dict.fromkeys(list(range(num_workers)))\n\n for w in range(num_workers):\n if w is not num_workers - 1:\n data[w] = ind[w * worker_size: (w+1) * worker_size]\n # data[w][\"X\"] = X_train[ind[w * worker_size: (w + 1) * worker_size], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size: (w + 1) * worker_size], :]\n else:\n data[w] = ind[w * worker_size:]\n # data[w][\"X\"] = X_train[ind[w * worker_size:], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size:], :]\n\n else:\n data = dict.fromkeys(list(range(num_workers)))\n for w in range(num_workers):\n shuffle(ind)\n data[w] = ind\n # data[w][\"X\"] = X_train[ind, :]\n # data[w][\"Y\"] = Y_train[ind, :]\n\n return data", "def _items_divide(self, numerator_data, denominator_data):\n items = {}\n if numerator_data['items'] is None:\n items = None\n else:\n for n in numerator_data['items']:\n # TODO what should we do when a matching item isn't found?\n matching_d = next((item for item in denominator_data['items'] if\n item['group'] == n['group']),\n {'group': '_unknown', 'value': None})\n if matching_d['value'] is None or n['value'] is None:\n divided = None\n else:\n divided = n['value'] / matching_d['value']\n\n # item = dict({'group': n['group'],\n # 'value': divided})\n items[n['group']] = divided\n\n return {'items': items, 'grouping': numerator_data['grouping'],\n 'data_id': numerator_data['data_id']}", "def _split_by_keypair(self, osw_dict={}): \n lst = osw_dict\n keypair_dict = []\n for d in lst:\n if d['key'] == 'raw_line':\n keypair_lst = re.split(r',',d['value'])\n \n for k,v in keypair_lst:\n _d = [{'timestamp':d['timestamp'] , \n 'category': d['category'], \n 'sub_category': d['sub_category'], \n 'key': k, \n 'value': v}]\n keypair_dict.extend(_d)\n \n return keypair_dict", "def production_volumes_to_frame(data):\n #need to handle well and wellbores separately\n wells=[]\n other=[]\n for item in data['data']['production']['data']:\n if item['dataEntity']['type']=='wellbore' or item['dataEntity']['type']=='well':\n #just unpack measurements\n if item['wellMeasurements']!=None:\n for key in item['wellMeasurements'].keys():\n wellitem=item['wellMeasurements'][key]\n name='wellMeasurements.'+key\n value=''\n uom=''\n if len(wellitem)>0:\n value=wellitem[0]['value']\n uom=wellitem[0]['uom']\n item[name+\".value\"]=value \n item[name+\".uom\"]=uom \n #null out the wellmeasurements\n item['wellMeasurements']={}\n wells.append(item)\n else:\n other.append(item)\n\n result_wells=json_normalize(wells)\n result_others=json_normalize(other)\n #concat the 2 frames into one...\n result=pd.concat([result_wells,result_others])\n return result", "def transform_city_dataframes(filled_frames, ttype=[0]):\n assert isinstance(ttype, list)\n assert all(isinstance(obj, int) for obj in ttype)\n assert isinstance(filled_frames, dict)\n mdf = filled_frames\n # Modified dataframe\n for tix, ty in enumerate(ttype):\n if ty == 0:\n # For per 1000 people calculation, value/pop*1000\n for state in mdf.keys():\n for city, df in enumerate(mdf[state]):\n df.columns = col_index_names1000\n df.iloc[:, 1:] = df.iloc[:, 1:].div(1/1000).div(\n df.iloc[:, 0], axis='index')\n if ty == 1:\n for state in mdf.keys():\n for city, df in enumerate(mdf[state]):\n df.columns = col_index_names_p\n df.iloc[:, 1:] = df.iloc[:, 1:].div(\n df.iloc[:, 0], axis='index')\n yield mdf", "def toDataFrame(self, split=True):\n\n def cleanColumns(df):\n # Cleanup columns\n colnames = df.columns\n colnames=[c.replace('\\'','') for c in colnames]\n colnames=[c[1:] if c.startswith('/') else c for c in colnames]\n # If there is only one group, we remove the group key\n groupNames = self.groupNames\n if len(groupNames)==1:\n nChar = len(groupNames[0])\n colnames=[c[nChar+1:] for c in colnames] # +1 for the \"/\"\n df.columns = colnames\n\n fh = self['data']\n if split:\n # --- One dataframe per group. We skip group that have empty data\n dfs={}\n for group in fh.groups():\n try:\n df = group.as_dataframe(time_index=True)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = group.as_dataframe(time_index=False)\n if len(df)>0:\n dfs[group.name] = df\n if len(dfs)==1:\n dfs=dfs[group.name]\n return dfs\n else:\n # --- One dataframe with all data\n try:\n df = fh.as_dataframe(time_index=True)\n cleanColumns(df)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = fh.as_dataframe(time_index=False)\n return df", "def create_floors_from_data(self, data, locations):\n item_dict = {}\n for (item_name, data) in data.items():\n item_dict[item_name] = Floor(\n number=data['number'],\n location=locations.get(data['location']))\n item_dict[item_name].save()\n return item_dict", "def makePairedDataForViSDEM(visdem_data,path,dict):\n \n #print dict\n #print visdem_data\n # 1. Restrict data to only one type of observer color deficiency\n obs_groups = dict['obs_groups']\n #print\n #coldef_type = dict['coldef_type']\n #print coldef_type\n #observer_coldef_type = dict['observer_coldef_type']\n \n \n #print obs_ids_sheet\n \n \n visdem_data_restr = pandas.DataFrame()\n for group_coldef_type, value in obs_groups.items():\n whatArr_tmp = [['observer_coldef_type',operator.eq,group_coldef_type],['variant_id',operator.eq,1]]\n visdem_data_restr_tmp = organizeArray(visdem_data,whatArr_tmp)\n visdem_data_restr = pandas.concat([visdem_data_restr_tmp,visdem_data_restr])\n visdem_data_restr.reset_index()\n\n dalt_ids = sorted(set(visdem_data_restr['dalt_id'].values.astype(int)))\n observer_ids = sorted(set(visdem_data_restr['observer_id'].values.astype(int)))\n \n columns=['observer_id', 'observer_coldef_type', 'coldef_type', 'set_id', 'motive_id', 'image_id', 'variant_id']\n for dalt_id in dalt_ids:\n col_tmp = \"dalt_id_\"+str(dalt_id).zfill(2)\n columns.append(col_tmp)\n \n #observer_coldef_types = set(visdem_data_restr['observer_coldef_type'].values)\n #print observer_coldef_types\n \n visdem_data_template = pandas.DataFrame(columns=columns)\n visdem_data_RT_paired = visdem_data_template.copy()\n visdem_data_ACC_paired = visdem_data_template.copy()\n index = 0\n\n for observer_id in observer_ids:\n whatArray_tmp = [['observer_id',operator.eq,observer_id]]\n visdem_data_restr_obs = organizeArray(visdem_data_restr, whatArray_tmp)\n \n obs_coldef_type = getColDefTypeForObserver(observer_id)\n ass_coldef_type = obs_groups[obs_coldef_type] # assigned coldef tyoe for particular observer group\n #print observer_id, obs_coldef_type, ass_coldef_type\n \n set_ids = sorted(set(visdem_data_restr_obs['set_id'].values.astype(int)))\n \n for set_id in set_ids:\n whatArray_tmp = [['set_id', operator.eq, set_id]]\n visdem_data_restr_set = organizeArray(visdem_data_restr_obs, whatArray_tmp)\n motive_ids = sorted(set(visdem_data_restr_set['motive_id'].values.astype(int)))\n\n \n for motive_id in motive_ids:\n whatArray_tmp = [['motive_id',operator.eq,motive_id],['variant_id', operator.eq,1]]\n visdem_data_restr_motive =organizeArray(visdem_data_restr_set, whatArray_tmp)\n \n pandas_tmp = pandas.DataFrame({'observer_id': observer_id,\n 'observer_coldef_type': obs_coldef_type,\n 'coldef_type': ass_coldef_type,\n 'set_id': set_id,\n 'motive_id': motive_id,\n 'image_id': 'nix',\n 'variant_id': 'nix',\n 'dalt_id': 'nix'\n },[index])\n pandas_RT_tmp = pandas_tmp.copy(); pandas_ACC_tmp = pandas_tmp.copy()\n \n tmp_cdt = 'a'; tmp_var = 'v'\n for dalt_id in dalt_ids:\n if dalt_id not in [0,99]: whatArray_tmp = [['dalt_id',operator.eq,dalt_id],['coldef_type',operator.eq,ass_coldef_type]]\n else: whatArray_tmp = [['dalt_id',operator.eq,dalt_id]]\n \n field = organizeArray(visdem_data_restr_motive, whatArray_tmp).reset_index().loc[0]\n \n if not field.empty:\n RT_tmp = field['resp_time']*1000 if bool(field['is_correct']) else float('NaN')\n ACC_tmp = field['is_correct']\n \n image_id = field['image_id']; pandas_RT_tmp['image_id'] = image_id; pandas_ACC_tmp['image_id'] = image_id\n tmp_var += str(field['variant_id'])\n tmp_cdt += str(field['coldef_type'])\n else: RT_tmp = float('NaN'); ACC_tmp = float('NaN')\n \n pandas_tmp['coldef_type'] = str(tmp_cdt); pandas_tmp['variant_id'] = str(tmp_var)\n \n pandas_RT_tmp[\"dalt_id_\"+str(dalt_id).zfill(2)]= float(RT_tmp)\n pandas_ACC_tmp[\"dalt_id_\"+str(dalt_id).zfill(2)]= ACC_tmp\n \n \n visdem_data_RT_paired = visdem_data_RT_paired.append(pandas_RT_tmp)\n visdem_data_ACC_paired = visdem_data_ACC_paired.append(pandas_ACC_tmp)\n index += 1\n \n # Layout RT for storage in path\n visdem_data_RT_paired = visdem_data_RT_paired[columns]\n visdem_data_RT_paired.observer_id = visdem_data_RT_paired.observer_id.astype(int)\n visdem_data_RT_paired.observer_coldef_type = visdem_data_RT_paired.observer_coldef_type.astype(int)\n visdem_data_RT_paired.set_id = visdem_data_RT_paired.set_id.astype(int)\n visdem_data_RT_paired.motive_id = visdem_data_RT_paired.motive_id.astype(int)\n visdem_data_RT_paired.image_id = visdem_data_RT_paired.image_id.astype(int)\n visdem_data_RT_paired.coldef_type = visdem_data_RT_paired.coldef_type.astype(str)\n visdem_data_RT_paired.to_csv(os.path.join(path,dict['filename']+'_visdem-data-RT-paired.csv'),sep=\";\")\n \n # Layout ACC for storage in path\n visdem_data_ACC_paired = visdem_data_ACC_paired[columns]\n visdem_data_ACC_paired.observer_id = visdem_data_ACC_paired.observer_id.astype(int)\n visdem_data_ACC_paired.observer_coldef_type = visdem_data_ACC_paired.observer_coldef_type.astype(int)\n visdem_data_ACC_paired.set_id = visdem_data_ACC_paired.set_id.astype(int)\n visdem_data_ACC_paired.motive_id = visdem_data_ACC_paired.motive_id.astype(int)\n visdem_data_ACC_paired.image_id = visdem_data_ACC_paired.image_id.astype(int)\n visdem_data_ACC_paired.coldef_type = visdem_data_ACC_paired.coldef_type.astype(str)\n visdem_data_ACC_paired.to_csv(os.path.join(path,dict['filename']+'_visdem-data-ACC-paired.csv'),sep=\";\")\n \n f = open(os.path.join(path,dict['filename']+'_visdem-data-paired_meta-data.txt'), 'w')\n json.dump(dalt_ids, f); f.close()", "def split_train_dev_set(df, percent=0.2):\n train = []\n dev = []\n for k, g in df.groupby(\"sender\")[\"mid\", \"recipients\"]:\n n_msg = g.shape[0]\n n_dev = int(n_msg * percent)\n g = g.sort_values(\"date\")\n g_train = g[:-n_dev]\n g_dev = g[-n_dev:]\n train.append(g_train)\n dev.append(g_dev)\n # concat all dataframe\n df_train = pd.concat(train, axis=0).sort_index()\n df_dev = pd.concat(dev, axis=0).sort_index()\n return df_train, df_dev", "def prepare_data(base_df, n_seconds_min=3):\n # Remove too short samples\n source_df = base_df.loc[base_df['seconds'] > n_seconds_min]\n # Group speakers duplicated by id\n df = source_df.loc[:, ['speaker_id', 'dataset_name']]\n df = df.set_index('speaker_id')\n df = df.loc[~df.index.duplicated(keep='first')]\n dfGrouped = source_df.groupby(['speaker_id']).sum()\n # Count the number of samples for each speaker\n dfCountAudio = source_df.groupby(['speaker_id']).count().filepath\n speakers_duration = dfGrouped.join(df)\n speakers_duration = speakers_duration.join(dfCountAudio)\n speakers_duration = speakers_duration.rename(columns={'filepath': 'n_samples'})\n return source_df, speakers_duration", "def classify_df(cls, data):\n\t\tif isinstance(data, pd.DataFrame) == False:\n\t\t\traise Exception(\"data must be pandas.Dataframe\")\n\t\t#get unique atom_type id and sorting\n\t\tunique_atom_type = sorted(data[\"atom_id\"].unique())\n\t\t# find the subset dataframe for each atom_type\n\t\t# put their into a dictionary\n\t\t# tuple pair key, val in .items() might be useful\n\t\tgroups = dict()\n\t\tfor i in unique_atom_type:\n\t\t\tgroups[i] = data.loc[data[\"atom_id\"] == i]\n\t\treturn groups", "def parse_data(data):\n parsed_data = {}\n for i, chunk in enumerate(re.split(r'\\n{2,}', data)):\n if i == 0:\n match = re.search(r'^(.*?) interest: (.*)\\n(.*?); (.*?)$', chunk)\n if match:\n source, query, geo, period = match.groups()\n parsed_data['info'] = {'source': source, 'query': query,\n 'geo': geo, 'period': period}\n else:\n chunk = _clean_subtable(chunk)\n rows = [row for row in csv.reader(StringIO(chunk)) if row]\n if not rows:\n continue\n label, parsed_rows = _parse_rows(rows)\n if label in parsed_data:\n parsed_data[label+'_1'] = parsed_data.pop(label)\n parsed_data[label+'_2'] = parsed_rows\n else:\n parsed_data[label] = parsed_rows\n\n return parsed_data", "def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out", "def get_pump_times(self, start):\n pumps_dict = {}\n for pump in self.pumps:\n dataframe_ = pd.DataFrame()\n time = []\n command = []\n for i in range(len(pump.start_intervals)):\n t_on = pump.start_intervals[i].epanet_on_time\n t_off = pump.start_intervals[i].epanet_off_time\n time += [start + t_on * pd.Timedelta(\"1S\"),\n start + t_off * pd.Timedelta(\"1S\")]\n command += [1, 0]\n dataframe_['Time'] = time\n dataframe_[pump.link_id] = command\n pumps_dict[pump.link_id] = dataframe_\n return pumps_dict", "def _get_data(self):\n\n data = self.get_data()\n\n required_data = ['open','close','open_date','high','low']\n if not np.isin(required_data, data.columns).all():\n raise ImplementationError(f'''\n Data must contain columns: {required_data}\n ''')\n\n data = data.sort_values('open_date')\n data.index = data.open_date\n\n temp_dates = pd.unique(data.open_date)\n self.total_candles = len(temp_dates)\n self.start_date, self.end_date = min(temp_dates), max(temp_dates)\n\n # Divide df based on symbol, create DataEngine object, add to dict.\n data_dict = {}\n for symbol in self.symbols.symbol:\n try:\n data_dict[symbol] = DataEngine(data[data.symbol == symbol])\n except DiscontinuousError as err:\n print(f'There are missing dates in data for {symbol}')\n raise err\n except ValueError as err:\n print(f'No data for provided for symbol: {symbol}')\n self.symbols = self.symbols.drop(symbol)\n\n return data_dict", "def outcome_split(df,outcome_dict={\n 'Good':['To Home','No Reason Given','Assissted Living Facility','No Reason Given'], # CAN WE ASSUME THIS??? that In Nursing Facility\n 'Bad':['Hospital','Death'],\n 'Test':['In Nursing Facility','Skilled Nursing Facility (SNF)',\n 'Not approriate for program, removed']}):\n outcome={}\n train={}\n for row in range(df.shape[0]):\n if df.iloc[row]['status'] in outcome_dict['Good']:\n outcome[df.iloc[row]['patient_link']]=1\n train[df.iloc[row]['patient_link']]=1\n if df.iloc[row]['status'] in outcome_dict['Bad']:\n outcome[df.iloc[row]['patient_link']]=0\n train[df.iloc[row]['patient_link']]=1\n if df.iloc[row]['status'] in outcome_dict['Test']:\n train[df.iloc[row]['patient_link']]=0\n elif df.iloc[row]['discharge']==True:\n train[df.iloc[row]['patient_link']]=1\n elif df.iloc[row]['discharge']==False:\n train[df.iloc[row]['patient_link']]=0\n df['outcome']=df['patient_link'].map(outcome)\n df['train']=df['patient_link'].map(train)\n return df", "def split_data(df_data, clusters):\n\n if clusters is None:\n\n return {0: df_data}\n\n return {\n k: df_data.loc[clusters.index[clusters == k]]\n for k in clusters.unique()\n }", "def extract_sensors_data(dataframe, ms_column='ms_ticker',\n time_column = 'Tstamp',\n ppg_columns=['led_1', 'led_2'],\n acc_columns=['acc_x', 'acc_y', 'acc_z']):\n\n sensors_dict = {}\n sensors = dataframe.loc[1:, 1:]\n sensors_columns = dataframe.head(1).values[0]\n sensors_columns = [i.replace(\" \", \"\") for i in sensors_columns if i.find('Index') == -1]\n sensors.columns = sensors_columns\n check_columns_exist(ppg_columns, sensors_columns)\n check_columns_exist(acc_columns, sensors_columns)\n check_columns_exist(ms_column, sensors_columns)\n check_columns_exist(time_column, sensors_columns)\n ppg = np.array(sensors[ppg_columns].values[1:, :], dtype=int)\n ms = np.array(sensors[ms_column].values[1:, ])\n ms_ints = np.array([int(str(i)[-3:]) for i in ms], dtype=float)\n ms_delta = [datetime.timedelta(milliseconds=i) for i in ms_ints]\n\n time = dataframe.loc[:,1].values[1:]\n time = np.array([pd.to_datetime(i) for i in time])\n time_with_ms = np.array(ms_delta) + time\n\n sensors_dict['PPG'] = ppg\n sensors_dict['time_sensors'] = time_with_ms.astype('datetime64[us]')\n sensors_dict['ms_ticker_sensors'] = ms\n acc = np.array(sensors[acc_columns].values[1:, :], dtype=float)\n sensors_dict['ACC'] = acc\n\n return sensors_dict", "def _split_into_categories(data_struct):\n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n groups = {}\n\n for cat in set(data_struct[\"Objects\"]): \n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n mask = []\n mask = np.empty((len(indices),len(data_names)))\n\n for counter,value in enumerate(data_names):\n mask[:,counter] = np.array(data_struct[value])[indices]\n\n groups[cat] = mask\n \n return(groups,data_names)", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def clean_data(raw_data, names=const.NAMES, meter_map=const.METER_MAP):\n\n cleaned_data = {}\n local_names = names.copy()\n if 'building_metadata' in local_names:\n local_names.remove('building_metadata')\n\n for name in local_names:\n print(f'Cleaning {name} dataset')\n df = raw_data[name]\n df.timestamp = pd.to_datetime(df.timestamp)\n if name.startswith('weather'):\n df = add_missing_weather_data(df)\n elif name in ['train', 'test']:\n df['meter_type'] = df['meter'].map(meter_map)\n cleaned_data[name] = df\n\n cleaned_data['building_metadata'] = raw_data['building_metadata']\n\n return cleaned_data", "def fetch_propagation_data(observer_stats):\n columns = [\n Observer.TABLE.c.start_time,\n Observer.TABLE.c.duration,\n Observer.TABLE.c.type,\n Observer.TABLE.c.status,\n Observer.TABLE.c.nameserver,\n ]\n query = select(columns).where(\n and_(Observer.TABLE.c.start_time >= observer_stats.start,\n Observer.TABLE.c.start_time <= observer_stats.end)\n )\n result = get_engine().execute(query)\n\n data = {\n 'by_type': {},\n 'by_nameserver': {},\n }\n for row in result:\n start_time, duration, type, status, nameserver = row\n if type not in data['by_type']:\n data['by_type'][type] = {\n 'error': [],\n 'success': [],\n }\n if nameserver not in data['by_nameserver']:\n data['by_nameserver'][nameserver] = {\n 'error': [],\n 'success': [],\n }\n datapoint = (start_time, duration)\n if status == Observer.STATUSES.COMPLETE:\n data['by_type'][type]['success'].append(datapoint)\n data['by_nameserver'][nameserver]['success'].append(datapoint)\n else:\n data['by_type'][type]['error'].append(datapoint)\n data['by_nameserver'][nameserver]['error'].append(datapoint)\n return data", "def _get_gedi2a_main_data_dict(self) -> dict:\n gedi_l2a_count_start = pd.to_datetime(\"2018-01-01T00:00:00Z\")\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n \"absolute_time\": (\n gedi_l2a_count_start\n + pd.to_timedelta(list(self[\"delta_time\"]), unit=\"seconds\")\n ),\n # Quality data\n \"sensitivity\": self[\"sensitivity\"][:],\n \"quality_flag\": self[\"quality_flag\"][:],\n \"solar_elevation\": self[\"solar_elevation\"][:],\n \"solar_azimuth\": self[\"solar_elevation\"][:],\n \"energy_total\": self[\"energy_total\"][:],\n # DEM\n \"dem_tandemx\": self[\"digital_elevation_model\"][:],\n \"dem_srtm\": self[\"digital_elevation_model_srtm\"][:],\n # Processing data\n \"selected_algorithm\": self[\"selected_algorithm\"][:],\n \"selected_mode\": self[\"selected_mode\"][:],\n # Geolocation data\n \"lon_lowestmode\": self[\"lon_lowestmode\"][:],\n \"longitude_bin0_error\": self[\"longitude_bin0_error\"][:],\n \"lat_lowestmode\": self[\"lat_lowestmode\"][:],\n \"latitude_bin0_error\": self[\"latitude_bin0_error\"][:],\n \"elev_lowestmode\": self[\"elev_lowestmode\"][:],\n \"elevation_bin0_error\": self[\"elevation_bin0_error\"][:],\n \"lon_highestreturn\": self[\"lon_highestreturn\"][:],\n \"lat_highestreturn\": self[\"lat_highestreturn\"][:],\n \"elev_highestreturn\": self[\"elev_highestreturn\"][:],\n } | {f\"rh{i}\": self[\"rh\"][:, i] for i in range(101)}\n return data", "def metro_phil_to_basis_dict(metro):\n for o in metro.objects:\n if o.is_scope:\n #one of the subkeys of the root object will be the detector phil. it will be the only one not extracted.\n detector_phil = o.extract()\n break\n #metro = metro.extract() # not needed\n\n bd = {(detector_phil.serial,): basis(matrix.col(detector_phil.orientation),\n matrix.col(detector_phil.translation)*1000) }\n for p in detector_phil.panel:\n bd[(detector_phil.serial,p.serial)] = basis(matrix.col(p.orientation),\n matrix.col(p.translation)*1000)\n for s in p.sensor:\n bd[(detector_phil.serial,p.serial,s.serial)] = basis(matrix.col(s.orientation),\n matrix.col(s.translation)*1000)\n for a in s.asic:\n bd[(detector_phil.serial,p.serial,s.serial,a.serial)] = basis(matrix.col(a.orientation),\n matrix.col(a.translation)*1000)\n\n return bd", "def create_frames_from_data(self, data, blocks):\n item_dict = {}\n for (item_name, data) in data.items():\n item_dict[item_name] = MeasurementFrame(\n name=data['name'],\n pixel_pose_x=data['pixel_pose_x'],\n pixel_pose_y=data['pixel_pose_y'],\n pixel_pose_theta=data['pixel_pose_theta'],\n block=blocks.get(data['block']))\n item_dict[item_name].save()\n return item_dict", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def split_data(self):\n if self.seper == \"whitespace\":\n seperstring = \" \"\n elif self.seper == \"comma\":\n seperstring = \",\"\n elif self.seper == \"tab\":\n seperstring = \"\\t\"\n else:\n print self.seper + \"is not a supported delimiter. Only whitespace, comma, and tab are accepted.\"\n sys.exit()\n f = open(self.filelocation, \"rb\")\n temp = list()\n for line in f.readlines():\n temp.append(line.replace(\"\\n\", \"\").split(seperstring))\n f.close()\n data = dict()\n count = 0\n for row in temp:\n data[count] = list()\n for each in row:\n if each is not \"\":\n data[count].append(each)\n count += 1\n return data, count", "def data(self):\n return pandas.concat([i.data for i in list(self.plates.values())])", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def instruments_with_meta_data(self):\n if len(self._instruments_with_meta_data) > 0:\n return self._instruments_with_meta_data\n else:\n self._borsdata_api = BorsdataAPI(constants.API_KEY)\n # fetching data from api\n countries = self._borsdata_api.get_countries()\n branches = self._borsdata_api.get_branches()\n sectors = self._borsdata_api.get_sectors()\n markets = self._borsdata_api.get_markets()\n instruments = self._borsdata_api.get_instruments()\n # instrument type dict for conversion (https://github.com/Borsdata-Sweden/API/wiki/Instruments)\n instrument_type_dict = {0: 'Aktie', 1: 'Pref', 2: 'Index', 3: 'Stocks2', 4: 'SectorIndex', 5: 'BranschIndex'}\n # creating an empty dataframe\n instrument_df = pd.DataFrame()\n # loop through the whole dataframe (table) i.e. row-wise-iteration.\n for index, instrument in instruments.iterrows():\n name = instrument['name']\n ins_id = instrument['insId']\n ticker = instrument['ticker']\n isin = instrument['isin']\n # locating meta-data in various ways\n # dictionary-lookup\n instrument_type = instrument_type_dict[instrument['instrument']]\n # .loc locates the rows where the criteria (inside the brackets, []) is fulfilled\n # located rows (should be only one) get the column 'name' and return its value-array\n # take the first value in that array ([0], should be only one value)\n market = markets.loc[markets['id'] == instrument['marketId']]['name'].values[0]\n country = countries.loc[countries['id'] == instrument['countryId']]['name'].values[0]\n sector = 'N/A'\n branch = 'N/A'\n # index-typed instruments does not have a sector or branch\n if market.lower() != 'index':\n sector = sectors.loc[sectors['id'] == instrument['sectorId']]['name'].values[0]\n branch = branches.loc[branches['id'] == instrument['branchId']]['name'].values[0]\n # appending current data to dataframe, i.e. adding a row to the table.\n instrument_df = instrument_df.append({'name': name, 'ins_id': ins_id, 'ticker': ticker, 'isin': isin, 'instrument_type': instrument_type,\n 'market': market, 'country': country, 'sector': sector, 'branch': branch}, ignore_index=True)\n # create directory if it do not exist\n if not os.path.exists(constants.EXPORT_PATH):\n os.makedirs(constants.EXPORT_PATH)\n # to csv\n instrument_df.to_csv(constants.EXPORT_PATH + 'instrument_with_meta_data.csv')\n # creating excel-document\n excel_writer = pd.ExcelWriter(constants.EXPORT_PATH + 'instrument_with_meta_data.xlsx')\n # adding one sheet\n instrument_df.to_excel(excel_writer, 'instruments_with_meta_data')\n # saving the document\n excel_writer.save()\n self._instruments_with_meta_data = instrument_df\n return instrument_df", "def get_outdoor_data(temp_dir,site):\n if site == 'berk':\n files_od = glob(join(temp_dir,'outdoor','20*.xlsx'))\n elif site == 'bus':\n files_od = glob(join(temp_dir,'outdoor','Busara*.csv'))\n else:\n raise NameError(site)\n\n dfs = []\n for f in files_od:\n if site == 'berk':\n this_df = pd.read_excel(f,sheet_name=0,usecols='B:D',index_col=0,parse_dates=True, header=1)\n elif site == 'bus':\n this_df = pd.read_csv(f,usecols=[0,1,2],index_col=0,parse_dates=True,header=2)\n \n # drop missing values that prevented conversion to float type\n if this_df.iloc[:,0].dtype != np.float64:\n this_df = this_df[this_df.iloc[:,0] != ' ']\n this_df = this_df.astype(np.float64)\n\n # correct for weird timezones in berkeley datalogger\n this_df = correct_tz(this_df,site)\n \n this_df.columns = ['T','RH']\n this_df.index.name = 'time'\n\n # convert to celsius\n this_df['T'] = (this_df['T'] - 32) * 5/9\n dfs.append(this_df)\n \n df_od = pd.concat(dfs)\n\n # drop duplicated measurements\n df_od = df_od[~df_od.index.duplicated(keep='last')].sort_index()\n \n # separate out into daily min,mean,max\n groups = df_od.groupby(df_od.index.date)\n dfs_od = {'all':df_od,\n 'min': groups.min(),\n 'mean': groups.mean(),\n 'max': groups.max()}\n \n for i in ['min','mean','max']:\n # remove first and last day to ignore days where we did not get full recording\n dfs_od[i] = dfs_od[i].iloc[1:-1,:]\n \n # name index so that we can merge onto multiIndex'd dataframe\n dfs_od[i].index.name = 'date'\n \n return dfs_od", "def split_data(\n data: pd.DataFrame,\n time_intervals: Union[pd.DataFrame, pd.Series, Dict[str, Sequence[str]]],\n include_start: Optional[bool] = False,\n) -> Dict[str, pd.DataFrame]:\n _assert_is_dtype(time_intervals, (pd.DataFrame, pd.Series, dict))\n\n if isinstance(time_intervals, pd.DataFrame):\n if len(time_intervals) > 1:\n raise ValueError(\"Only dataframes with 1 row allowed!\")\n time_intervals = time_intervals.iloc[0]\n\n if isinstance(time_intervals, pd.Series):\n time_intervals = _split_data_series(data, time_intervals, include_start)\n elif include_start:\n time_intervals[\"Start\"] = (\n data.index[0].to_pydatetime().time(),\n list(time_intervals.values())[0][0],\n )\n\n data_dict = {name: data.between_time(*start_end) for name, start_end in time_intervals.items()}\n data_dict = {name: data for name, data in data_dict.items() if not data.empty}\n return data_dict", "def merge_record(self, dt, container = ''): \n record_dataset_legth ={} \n \n \n \"\"\" Combining the ncar_t and ncar_w files.\n If both are present, select the ncar_t data and rename it as 'ncar'. \n If only one is present, simply rename it as 'ncar'. \n \"\"\" \n if ('ncar_t' in list(container.keys()) ):\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_t']['df'] \n \n elif ( 'ncar_w' in list(container.keys()) and 'ncar_t' not in list(container.keys()) ) :\n container['ncar'] = {} \n container['ncar']['df'] = container['ncar_w']['df'] \n\n \n for k in container.keys():\n if k == 'ncar_t' or k == 'ncar_w': \n continue \n record_dataset_legth[k] = len(container[k]['df'] )\n \n \n \"\"\" For now, choosing the dataset with more records of all or igra2>ncar>rest data if available and with same number of records \"\"\"\n best_ds, all_ds , best_datasets, all_ds_reports = 'dummy' , [] , [], [] # total number of records, name of the chosen dataset , list of other possible dataset with available data \n \n most_records = max( [ v for v in record_dataset_legth.values() ] ) # maximum number of records per date_time \n \n for k, v in record_dataset_legth.items(): \n if v == 0:\n continue\n if v == most_records:\n best_datasets.append(k) \n if v > 0:\n all_ds.append(k) # all other datasets with smaller number of records than the maximum found\n try: \n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + container[k]['df']['report_id'].values[0] ) # converting the original report id using the same convention as for observation_id\n except:\n all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + int( (container[k]['df']['report_id'].values[0]).tostring() ) ) # converting the original report id using the same convention as for observation_id\n \n \n #all_ds_reports.append(np.nan)\n #print ( type(container[k]['df']['report_id'].values) )\n #all_ds_reports.append( self.observation_ids_merged[k] * 1000000000 + float(container[k]['df']['report_id'].values[0].decode('latin1') ))\n \n if len(best_datasets) ==0:\n print('wrong??? please check')\n return 0,0,0,0 \n \n if 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'ncar' in best_datasets:\n best_ds = 'ncar'\n elif 'era5_1' in best_datasets:\n best_ds = 'era5_1' \n else:\n best_ds = best_datasets[0]\n \n \"\"\" Extract container \"\"\" \n selected_df = container[best_ds]['df'].copy(deep = True) # might take extra time, dont know how to get rid of this \n\n try:\n merged_report = self.observation_ids_merged[best_ds] * 1000000000 + int( selected_df['report_id'].values[0].tostring() ) \n except:\n merged_report = np.nan \n\n \"\"\" Calculate new unique observation id \"\"\"\n try: \n obs_ids_merged = [ self.observation_ids_merged[best_ds] * 1000000000 + int( i.tostring() ) for i in selected_df['observation_id'] ]\n except:\n obs_ids_merged = [ np.nan for i in selected_df['observation_id'] ]\n \n \n selected_df['observation_id'] = obs_ids_merged\n \n \"\"\" Calculate new unique report id \"\"\" \n selected_df['report_id'] = merged_report\n\n \"\"\" Returning a string with the alternative available datasets data \"\"\"\n if len(all_ds_reports) > 1: \n duplicates = \",\".join( [ str(i) for i in all_ds_reports] )\n else:\n duplicates = str(all_ds_reports[0])\n \n \n \"\"\" Extracting the merged header_table.\n Again, must consider the special case where best_ds == ncar. \n Note that the header table *should* be identical for ncar_w or ncar_t \"\"\" \n if best_ds != 'ncar':\n header = self.get_header_table(dt, ds= best_ds, all_ds = duplicates , length= len(selected_df) )\n \n elif ( best_ds == 'ncar' and 'ncar_t' in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_t', all_ds = duplicates, length= len(selected_df))\n \n elif ( best_ds == 'ncar' and 'ncar_t' not in list(container.keys()) ) :\n header = self.get_header_table(dt, ds = 'ncar_w', all_ds = duplicates, length= len(selected_df) ) \n \n logging.debug('I use %s record since it has more entries: %s but other available datasets are : %s' , best_ds , str(most_records) , all_ds ) \n \n #print ('duplicates are: ', duplicates)\n return selected_df, best_ds , duplicates, header", "def _gather_deep_data(self):\n\n cleaned_data_from_website = list()\n\n for i, search_result in self.data_from_website.iterrows():\n cleaned_data_from_website.append(self._deep_data(search_result.url))\n\n cleaned_data_from_website = pd.DataFrame(cleaned_data_from_website)\n if len(cleaned_data_from_website) == 0:\n cleaned_data_from_website['@id'] = '0'\n cleaned_data_from_website.set_index('@id', inplace=True)\n self.data_from_website = cleaned_data_from_website", "def process_data(data):\n # set up an OrderedDict to hold the data with initial data set to 0\n output = OrderedDict((s, {'calls': [], 'out_count': 0, 'out_dur':\n datetime.timedelta(seconds=0), 'in_count': 0, 'in_dur':\n datetime.timedelta(seconds=0), 'total_dur':\n datetime.timedelta(seconds=0)}) for s in config.SALESPEOPLE)\n for d in data:\n # assume it's an outgoing call.\n call_type = 'outgoing'\n key = d['From']\n tofrom = '{0} {1}'.format(d['To CID Name'], d['To CID Number'])\n if not tofrom.strip():\n tofrom = d['Digits'].strip() or d['To']\n out_count, in_count = 1, 0\n dur_h, dur_m, dur_s = map(int, d['Duration'].split(':'))\n duration = datetime.timedelta(hours=dur_h, minutes=dur_m,\n seconds=dur_s)\n out_dur = duration\n in_dur = datetime.timedelta(seconds=0)\n if key not in config.SALESPEOPLE:\n # it's an incoming call if the From name isn't one of the\n # config.SALESPEOPLE. Adjust the data accordingly\n call_type = 'incoming'\n key = d['To']\n tofrom = '{0} {1}'.format(d['From CID Name'], d['From CID Number'])\n if not tofrom.strip():\n tofrom = d['To']\n out_count, in_count = 0, 1\n out_dur = datetime.timedelta(seconds=0)\n in_dur = duration\n\n # format the phone numbers\n tofrom = re.sub(r'1?(\\d{3})(\\d{3})(\\d{4})$', r'\\1-\\2-\\3', tofrom)\n\n output[key]['calls'].append({'time': d['Start Time'], 'type':\n call_type, 'duration': d['Duration'], 'tofrom': tofrom})\n output[key]['out_count'] = output[key]['out_count'] + out_count\n output[key]['out_dur'] = output[key]['out_dur'] + out_dur\n output[key]['in_count'] = output[key]['in_count'] + in_count\n output[key]['in_dur'] = output[key]['in_dur'] + in_dur\n output[key]['total_dur'] = output[key]['total_dur'] + duration\n\n return output", "def crawl_data(self, data_type):\n i = 0\n cat = {}\n prod = {}\n for term in self.search_response:\n if data_type == 'category' and term['products'] > 6000:\n i += 1\n cat[i] = {\"name\": term['name']}\n self.categories.append(cat[i])\n elif data_type == 'product':\n nutrigrade = \"\".join(term[\"nutrition_grades_tags\"])\n if nutrigrade in (\"a\", \"b\", \"c\", \"d\", \"e\"):\n i += 1\n prod[i] = {\"name\": term['product_name_fr'], \"url\": term['url'], \"desc\": term['generic_name_fr'],\n \"brand\": term['brands'], \"categories\": term['categories'], \"store\": term['stores'],\n \"nutriscore\": nutrigrade}\n self.products.append(prod[i])\n else:\n pass", "def __init__(self, mtype=None):\n self.data = pd.DataFrame()\n self.fields = list()\n\n # Set data type\n if mtype not in DataContainer.mergeTypes and mtype is not None:\n raise exceptions.TypeError\n else:\n self.mtype = mtype", "def get_pars_df(plan_type, stop=False):\n df = pd.concat([get_plan_pars(patient, plan_type, stop)\n for patient in patients])\n return df", "def readmeter(self):\n # Read until /\n line = \"\"\n while not line.startswith(\"/\"):\n line = self.readline().decode()\n\n # Populate header\n header = line[1:].strip()\n\n # Skip empty line after header\n self.readline()\n\n # Read lines and populate dictionary until !\n data = {}\n line = self.readline().decode()\n while not line.startswith(\"!\"):\n # Get OBIS\n next_obis = line[:line.index(\"(\")]\n if next_obis:\n obis = next_obis\n data[obis] = []\n # Get and loop over the arguments\n args = re.findall(\"\\(([^()]*)\\)\", line)\n for arg in args:\n # Do some basic conversions\n valwithunit = re.match(\"^([0-9.]+)\\*([a-zA-Z]+)$\", arg)\n if valwithunit:\n arg = float(valwithunit[1]), valwithunit[2]\n # Save argument with corresponding OBIS\n data[obis].append(arg)\n line = self.readline().decode()\n return header, data", "def disaggregate(self, mains, output_datastore):\n \n building_path = '/building{}'.format(mains.building())\n # only writes one appliance and meter per building\n meter_instance = 2\n mains_data_location = '{}/elec/meter1'.format(building_path)\n \n #dis_main = pd.DataFrame()\n chunk_number = 0\n timeframes = []\n\n for chunk in mains.power_series():\n \n # Record metadata\n timeframes.append(chunk.timeframe)\n measurement = chunk.name\n cols = pd.MultiIndex.from_tuples([chunk.name])\n \n dis_chunk = self.disaggregate_chunk(\n pd.DataFrame(chunk.resample(self.sample_period, how=self.sampling_method)))\n #dis_main = pd.concat([dis_main, dis_chunk])\n chunk_number += 1\n print(str(chunk_number) + \" chunks disaggregated\")\n \n # Write appliance data to disag output\n key = '{}/elec/meter{}'.format(building_path, meter_instance)\n df = pd.DataFrame(\n dis_chunk.values, index=dis_chunk.index,\n columns=cols)\n output_datastore.append(key, df)\n\n # Copy mains data to disag output\n output_datastore.append(key=mains_data_location,\n value=pd.DataFrame(chunk, columns=cols))\n\n # Saving output datastore:\n #output_datastore.append(key=mains.key, value=dis_main)\n \n ##################################\n # Add metadata to output_datastore\n\n # TODO: `preprocessing_applied` for all meters\n # TODO: split this metadata code into a separate function\n # TODO: submeter measurement should probably be the mains\n # measurement we used to train on, not the mains measurement.\n \n date_now = datetime.now().isoformat().split('.')[0]\n output_name = 'NILMTK_MLE_' + date_now\n resample_seconds = 10\n mains_data_location = '{}/elec/meter1'.format(building_path)\n\n # DataSet and MeterDevice metadata:\n meter_devices = {\n 'MLE': {\n 'model': 'MLE',\n 'sample_period': resample_seconds,\n 'max_sample_period': resample_seconds,\n 'measurements': [{\n 'physical_quantity': measurement[0],\n 'type': measurement[1]\n }]\n },\n 'mains': {\n 'model': 'mains',\n 'sample_period': resample_seconds,\n 'max_sample_period': resample_seconds,\n 'measurements': [{\n 'physical_quantity': measurement[0],\n 'type': measurement[1]\n }]\n }\n }\n\n merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)\n total_timeframe = TimeFrame(merged_timeframes[0].start,\n merged_timeframes[-1].end)\n\n dataset_metadata = {'name': output_name, 'date': date_now,\n 'meter_devices': meter_devices,\n 'timeframe': total_timeframe.to_dict()}\n output_datastore.save_metadata('/', dataset_metadata)\n\n # Building metadata\n\n # Mains meter:\n elec_meters = {\n 1: {\n 'device_model': 'mains',\n 'site_meter': True,\n 'data_location': mains_data_location,\n 'preprocessing_applied': {}, # TODO\n 'statistics': {\n 'timeframe': total_timeframe.to_dict()\n }\n }\n }\n\n # Appliances and submeters:\n appliances = []\n appliance = {\n 'meters': [meter_instance],\n 'type': 'kettle',\n 'instance': 1\n # TODO this `instance` will only be correct when the\n # model is trained on the same house as it is tested on.\n # https://github.com/nilmtk/nilmtk/issues/194\n }\n appliances.append(appliance)\n\n elec_meters.update({\n meter_instance: {\n 'device_model': 'MLE',\n 'submeter_of': 1,\n 'data_location': ('{}/elec/meter{}'\n .format(building_path, meter_instance)),\n 'preprocessing_applied': {}, # TODO\n 'statistics': {\n 'timeframe': total_timeframe.to_dict()\n }\n }\n })\n elec_meters[meter_instance]['name'] = 'kettle'\n\n building_metadata = {\n 'instance': mains.building(),\n 'elec_meters': elec_meters,\n 'appliances': appliances\n }\n\n output_datastore.save_metadata(building_path, building_metadata)", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def merge_energy_datatypes(osm_path): \n #extract line data\n df_line = powerline_limited(osm_path) #extract required data\n if 'asset' in df_line.columns:\n df_line['asset'] = list(map(lambda x: x.lower(), df_line['asset'])) #make sure that asset column is in lowercase characters\n #reclassify assets \n mapping_dict = {\n \"cable\" : \"cable\", #underground\n \"minor_cable\" : \"cable\", \n #\"generator\" : \"generator\", #device used to convert power from one form to another\n \"line\" : \"line\", #overground\n \"minor_line\" : \"minor_line\", #overground\n #\"plant\" : \"plant\", #place where power is generated\n #\"substation\" : \"substation\"\n }\n df_line['asset'] = df_line.asset.apply(lambda x : mapping_dict[x]) #reclassification \n\n if 'voltage' in df_line.columns:\n df_line = df_line.drop(['voltage'], axis=1) \n \n #extract polygon data\n df_poly = power_polygon(osm_path) #extract required data\n df_poly['geometry'] =pygeos.buffer(df_poly.geometry,0) #avoid intersection\n \n #extract point data\n df_point = power_point(osm_path) #extract required data\n \n return pandas.concat([df_line, df_poly, df_point], ignore_index=True)", "def create_data_model(con,route_id):\n data = {}\n df1 = pd.read_sql('SELECT * FROM travel_times WHERE route_id = \"{0}\";'.format(route_id), con)\n #df1_data = df1.pivot().values\n data['distance_matrix'] = df1.pivot(index='stop1',columns='stop2',values='travel_time').values\n print('data loaded for {0}'.format(route_id))\n data['num_vehicles'] = 1\n data['depot'] = 0\n return data", "def fetch_dataloader(types, dataset_dir, params):\n\n dataloaders = {}\n samplers = {}\n\n for split in ['train', 'val', 'test']:\n if split in types:\n path = os.path.join(dataset_dir, \"{}\".format(split))\n\n # Use the train_transformer if training data, else use eval_transformer without random flip\n # take care of 'pin_memory' and 'num_workers'\n if split == 'train':\n train_set = BaseDataset(path, train_transformer)\n sampler = None\n if params.distributed:\n sampler = torch.utils.data.distributed.DistributedSampler(\n train_set)\n dataloader = DataLoader(\n train_set,\n batch_size=params.batch_size_pre_gpu,\n shuffle=(sampler is None),\n num_workers=params.num_workers,\n pin_memory=params.cuda,\n sampler=sampler)\n\n else:\n val_set = BaseDataset(path, eval_transformer)\n sampler = None\n if params.distributed:\n sampler = torch.utils.data.distributed.DistributedSampler(\n val_set)\n dataloader = DataLoader(\n val_set,\n batch_size=params.batch_size_pre_gpu,\n shuffle=False,\n pin_memory=params.cuda,\n num_workers=params.num_workers,\n sampler=sampler)\n\n dataloaders[split] = dataloader\n samplers[split] = sampler\n\n return dataloaders, samplers", "def build_dict_of_sets(data):\n\n data_sets = {}\n\n for part in data:\n data_sets[part] = set(data[part])\n\n return data_sets", "def _transform_spectra_data(spec_id_dict: dict):\n collection_run_id_dict = {}\n spectra_data = []\n i = 1\n for collection in spec_id_dict.keys():\n for run in spec_id_dict[collection].keys():\n collection_run_id = \"/\".join(filter(None, [collection, run]))\n if collection_run_id not in collection_run_id_dict.keys():\n collection_run_id_dict[collection_run_id] = i\n spectra_data_object = {\n \"id\": i,\n \"location\": collection_run_id,\n \"spectrum_id_format\": \"multiple peak list nativeID format\",\n # 'file_format': #TODO can we infer this?\n }\n spectra_data.append(spectra_data_object)\n return spectra_data, collection_run_id_dict", "def get_weight_df(self) -> pd.DataFrame:\n\n day_to_week = self.calendar_df.set_index(\"d\")[\"wm_yr_wk\"].to_dict()\n weight_df = self.train_df[[\"item_id\", \"store_id\"] + self.weight_columns].set_index([\"item_id\", \"store_id\"])\n weight_df = (weight_df.stack().reset_index().rename(columns = {\"level_2\": \"d\", 0: \"value\"}))\n weight_df[\"wm_yr_wk\"] = weight_df[\"d\"].map(day_to_week)\n weight_df = weight_df.merge(self.sell_prices_df, how = \"left\", on = [\"item_id\", \"store_id\", \"wm_yr_wk\"])\n weight_df[\"value\"] = weight_df[\"value\"] * weight_df[\"sell_price\"]\n weight_df = weight_df.set_index([\"item_id\", \"store_id\", \"d\"]).unstack(level = 2)[\"value\"]\n weight_df = weight_df.loc[zip(self.train_df.item_id, self.train_df.store_id), :].reset_index(drop = True)\n weight_df = pd.concat([self.train_df[self.id_columns], weight_df], axis = 1, sort = False)\n\n weights_map_lst = []\n for group_id in self.group_ids:\n if type(group_id) == str:\n group_id = [group_id]\n\n lv_weight = weight_df.groupby(group_id)[self.weight_columns].sum().sum(axis = 1)\n lv_weight = lv_weight / lv_weight.sum()\n \n if len(group_id) == 2:\n lv_weight.index = pd.Series(lv_weight.index.values).apply(lambda x: \"--\".join(x))\n\n weights_map_lst.append(lv_weight)\n\n weights_df = pd.concat(weights_map_lst) / len(self.group_ids)\n\n return weights_df", "def prepare_data_to_send(self, category, agent_id, check_data_type=True):\n\n agent_data = self.get_agent_data_by_category(category)\n point = {}\n\n if check_data_type:\n for key, value in agent_data.items():\n try:\n val = (int(value))\n point[key] = value\n except:\n pass\n\n\n else:\n point = agent_data\n\n return [\n {\n \"measurement\": \"hardware_info\",\n \"tags\": {\"agent_number\": f\"{agent_id}\", \"category\": category},\n \"fields\": point\n }\n ]", "def get_cleaned_price_data(raw_price_df):\n processed_data = defaultdict(list)\n global_map = defaultdict(list)\n for idx, data_dict in raw_price_df.iterrows():\n is_valid, res_tuple, unit_price = get_valid_unique_tuple_for_data_and_price(data_dict)\n if is_valid:\n processed_data[res_tuple].append(unit_price)\n shipment_type = str(data_dict[hist_keys.SHIPMENT_TYPE])\n shipper_id = str(data_dict[hist_keys.SHIPPER])\n global_map[(shipment_type, shipper_id)].append(unit_price)\n if shipper_id is not None:\n global_map[(shipment_type, None)].append(unit_price)\n return processed_data, global_map", "def consolidate_mel(mel,delivery=False):\n c_MEL={}\n WP=00\n \n mel['Part No.']=mel['WP Activity/ Part No.']\n mel['Part No.']=mel['Part No.'].astype(str)\n\n #mel['Quantity']=mel['Quantity'].str.replace('m','',regex=False) \n\n mel['Quantity']=mel['Quantity'].fillna(value=0).astype(str) \n mel['Quantity']=mel['Quantity'].str.replace('meters','',regex=True) \n mel['Quantity']=mel['Quantity'].str.replace('m','',regex=False) \n\n\n mel['Quantity']=mel['Quantity'].astype('float')\n if delivery:\n for i, row in mel.iterrows():\n c_MEL[(str(row['Part No.'])+row['Delivery'])]={'Quantity':mel['Quantity'][(mel['Part No.'].astype(str)==str(row['Part No.'])) & (mel['Delivery']==row['Delivery'])].sum(),\n 'Part No.':row['Part No.'],\n 'Delivery':row['Delivery'],\n 'Equipment Description':row['Equipment Description'],\n 'WP':row['WP']}\n else:\n for i, row in mel.iterrows():\n c_MEL[(str(row['Part No.']))]={'Quantity':mel['Quantity'][mel['Part No.'].astype(str)==str(row['Part No.'])].sum(),\n 'Part No.':row['Part No.'],\n 'Equipment Description':row['Equipment Description']}\n \n c_MEL=pd.DataFrame(c_MEL).T \n return c_MEL", "def data():\n df = gen_sliced_df()\n df = df[[\"x\", \"z_categ\", \"y\", \"residual\"]]\n new_df = df.iloc[[1, 100, 150, 200, 250, 300, 305, 400, 405, 500, 550, 609]].copy()\n return {\"df\": df, \"new_df\": new_df}", "def process_data(data):\n info = {\n 'cities': [],\n 'temperatures': [],\n 'humidities': [],\n }\n cities = data['list']\n for city in cities:\n main_data = city['main']\n info['cities'].append(city['name'])\n info['temperatures'].append(main_data['temp'])\n info['humidities'].append(main_data['humidity'])\n\n return info", "def location_specific_to_dataset(model_run):\n # for every transmission technology, we extract distance information, if it\n # is available\n data_dict = dict()\n\n data_dict[\"distance\"] = dict(\n dims=\"loc_techs_transmission\",\n data=[\n model_run.get_key(\n \"locations.{loc_from}.links.{loc_to}.techs.{tech}.distance\".format(\n **split_loc_techs_transmission(loc_tech)\n ),\n np.nan,\n )\n for loc_tech in model_run.sets[\"loc_techs_transmission\"]\n ],\n )\n # If there is no distance information stored, distance array is deleted\n if data_dict[\"distance\"][\"data\"].count(np.nan) == len(\n data_dict[\"distance\"][\"data\"]\n ):\n del data_dict[\"distance\"]\n\n data_dict[\"lookup_remotes\"] = dict(\n dims=\"loc_techs_transmission\",\n data=concat_iterable(\n [\n (k[\"loc_to\"], k[\"tech\"], k[\"loc_from\"])\n for k in [\n split_loc_techs_transmission(loc_tech)\n for loc_tech in model_run.sets[\"loc_techs_transmission\"]\n ]\n ],\n [\"::\", \":\"],\n ),\n )\n # If there are no remote locations stored, lookup_remotes array is deleted\n if data_dict[\"lookup_remotes\"][\"data\"].count(np.nan) == len(\n data_dict[\"lookup_remotes\"][\"data\"]\n ):\n del data_dict[\"lookup_remotes\"]\n\n data_dict[\"available_area\"] = dict(\n dims=\"locs\",\n data=[\n model_run.locations[loc].get(\"available_area\", np.nan)\n for loc in model_run.sets[\"locs\"]\n ],\n )\n\n # remove this dictionary element if nothing is defined in it\n if set(data_dict[\"available_area\"][\"data\"]) == {np.nan}:\n del data_dict[\"available_area\"]\n\n # Coordinates are defined per location, but may not be defined at all for\n # the model\n if \"coordinates\" in model_run.sets:\n data_dict[\"loc_coordinates\"] = dict(dims=[\"locs\", \"coordinates\"], data=[])\n for loc in model_run.sets[\"locs\"]:\n data_dict[\"loc_coordinates\"][\"data\"].append(\n [\n model_run.locations[loc].coordinates[coordinate]\n for coordinate in model_run.sets.coordinates\n ]\n )\n\n return data_dict", "def data_pandas(detections):\n return DataWrapperPandas(detections, duplicates_radius=1)", "def create_time_s(df, medidor, freq='15T'):\n dates_complete = pd.date_range('1/18/2013', '02/09/2014', freq='15T')\n # this dates take them from the file\n my_complete_series = pd.Series(dates_complete)\n frame1 = my_complete_series.to_frame()\n frame1.columns = ['key']\n merged = pd.merge(frame1, df, on='key', how='outer')\n merged = merged.sort('key')\n # fill the merged file with the number of the meter\n merged['medidor'].fillna(medidor, inplace=True)\n\n return merged", "def fetch_data(self, fields):\n n_symbols = len(self.symbol_list)\n n_iters = n_symbols // 100 + 1\n\n # data_dict keyed by symbol\n data_dict = {}\n for i in range(0, n_iters):\n start = i*100\n end = (i+1) * 100\n response = self.get_points(self.symbol_list[start:end],\n fields)['response']\n\n for symbol in response:\n if response[symbol]['meta']['status'] == 'ok':\n symbol_data = response[symbol]['results']\n data_dict[symbol] = {}\n for data_point in symbol_data:\n data_dict[symbol][data_point] = \\\n symbol_data[data_point]['data'][1]\n else:\n data_dict[symbol] = {field:np.nan for field in fields}\n return data_dict", "def read_delsys_csv(filename: str) -> Dict[str, Dict[str, TimeSeries]]:\n # Check the number of rows to skip\n n_rows = 0\n with open(filename, 'r') as fid:\n while True:\n s = fid.readline()\n if s.startswith('X[s]'):\n break\n else:\n n_rows += 1\n\n # Open the CSV\n df = pd.read_csv(filename, skiprows=n_rows)\n\n # Create a TimeSeries for each signal since they all have different time\n # vectors\n n_signals = int(len(df.columns) / 2)\n\n emg = {}\n acc = {}\n gyro = {}\n mag = {}\n\n for i_signal in range(n_signals):\n time = df.iloc[:, i_signal * 2].to_numpy()\n name = df.columns[i_signal * 2 + 1]\n data = df.iloc[:, i_signal * 2 + 1].to_numpy()\n\n if ': Acc' in name:\n short_name = name\n ts = TimeSeries(time=time, data={short_name: data})\n acc[short_name] = ts\n elif ': Mag' in name:\n short_name = name\n ts = TimeSeries(time=time, data={short_name: data})\n mag[short_name] = ts\n elif ': Gyro' in name:\n short_name = name\n ts = TimeSeries(time=time, data={short_name: data})\n gyro[short_name] = ts\n elif ': EMG' in name:\n short_name = name.split(':')[0]\n ts = TimeSeries(time=time, data={short_name: data})\n emg[short_name] = ts\n\n return {'emg': emg, 'acc': acc, 'gyro': gyro, 'mag': mag}", "def split(self, bins):\n collapsed = MatrixDict()\n sub_dicts = dict()\n for meta, group in bins.iteritems():\n collapsed[meta] = Counter()\n small = MatrixDict()\n sub_dicts[meta] = small\n for code in group:\n collapsed[meta].update(self[code])\n small[code] = self[code]\n return collapsed, sub_dicts", "def group_data(data):\n\n data_grouped = dict()\n\n for data_pt in data:\n resonance_id = data_pt.par['resonance_id']\n\n assignment = parse_assignment(resonance_id)\n index = int(assignment[0][0])\n\n data_grouped.setdefault((index, resonance_id), []).append(data_pt)\n\n return data_grouped", "def _prepare_distances_dict(self, unknown_area):\n\n new_d = self.joined_datasets.copy()\n new_d = new_d.append(unknown_area, ignore_index=True)\n\n try:\n new_d['px'] = new_d['geometry'].apply(lambda v: v[0].x)\n new_d['py'] = new_d['geometry'].apply(lambda v: v[0].y)\n except TypeError:\n new_d['px'] = new_d['geometry'].apply(lambda v: v.x)\n new_d['py'] = new_d['geometry'].apply(lambda v: v.y)\n\n new_dict = (new_d.groupby(self.id_col)\n .apply(lambda v:\n {'coordinates': list(map(list, zip(v['px'], v['py'], v['TOT'])))})\n .to_dict())\n return new_dict", "def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()" ]
[ "0.54995227", "0.5438723", "0.5368502", "0.5285218", "0.5209991", "0.5202449", "0.51980925", "0.5175748", "0.5165017", "0.5150522", "0.51441115", "0.5076895", "0.50656265", "0.50650203", "0.5061612", "0.5057883", "0.50543916", "0.5050856", "0.49904716", "0.4989884", "0.49895522", "0.4975154", "0.49727502", "0.49721086", "0.49554804", "0.4952854", "0.494106", "0.49159202", "0.49117824", "0.4905359", "0.49016547", "0.48969683", "0.48893842", "0.4876273", "0.48681414", "0.48659974", "0.48517102", "0.48478574", "0.48434037", "0.4831031", "0.48214617", "0.48207173", "0.48195887", "0.48184362", "0.4812085", "0.4811127", "0.48109448", "0.48048046", "0.48034492", "0.47958666", "0.47904074", "0.4790007", "0.47843713", "0.4783434", "0.47807017", "0.47785622", "0.47782564", "0.4776063", "0.47720587", "0.47712934", "0.47690356", "0.47662082", "0.4763573", "0.47536817", "0.47526443", "0.47521272", "0.47468033", "0.47438022", "0.47364417", "0.47322688", "0.47237217", "0.47223103", "0.47118953", "0.47074524", "0.47028893", "0.47013938", "0.47006705", "0.46981785", "0.46934822", "0.46847537", "0.4683167", "0.4674588", "0.4674344", "0.46736634", "0.46683455", "0.46667448", "0.46661127", "0.4663759", "0.4662087", "0.46600062", "0.46570677", "0.46479344", "0.46476597", "0.46470958", "0.4641678", "0.4639803", "0.4638428", "0.4635751", "0.4632958", "0.46329466" ]
0.806629
0
dataset_name should be 'train' or 'test'
def produce_and_cache_small_dataset_dict(dataset_name, n=500000, meter_types=const.METER_MAP.values()): small_dataset_cache_file = pathlib.Path(dataset_name + '_small_store_joined.h5') if small_dataset_cache_file.exists(): small_dataset_dict = import_dict_from_cached(small_dataset_cache_file, meter_types) else: big_dataset_cache_file = pathlib.Path(dataset_name + '_store_joined.h5') big_dataset_dict = import_dict_from_cached(big_dataset_cache_file, meter_types) small_dataset_dict = {key: big_dataset.head(n) for key, big_dataset in big_dataset_dict.items()} _cache_data(small_dataset_dict, small_dataset_cache_file) return small_dataset_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataset(dataset_name):\n dataset_as_lower = dataset_name.lower()\n if dataset_as_lower in _datasets_from_keras.keys():\n data_details = _datasets_from_keras[dataset_as_lower]\n (x_train, y_train), (x_test, y_test) = data_details['data'].load_data()\n else:\n raise IOError(\"Dataset {0} is NOT supported\".format(dataset_name))\n\n # Performing pre-processing specifically for images datasets.\n if data_details['data type'] == 'image':\n x_train = _pre_process_images(x_train, data_details)\n x_test = _pre_process_images(x_test, data_details)\n\n return x_train, y_train, x_test, y_test", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def load_dataset(name):\n dataset, info = tfds.load(name=name,\n with_info=True,\n data_dir='data/external')\n train_dataset = dataset['train']\n train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE,\n reshuffle_each_iteration=False)\n\n return train_dataset", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n\n elif dataset_name == \"Wine Dataset\":\n data = datasets.load_wine()\n\n elif dataset_name == \"MNIST\":\n data = datasets.load_digits()\n\n #elif dataset_name == \"Boston Housing Price\":\n # data = datasets.load_boston()\n\n X = data.data\n y = data.target\n\n return X, y", "def get_dataset(dataset_name, split_name, dataset_dir):\n if dataset_name not in _DATASETS_INFORMATION:\n raise ValueError('The specified dataset is not supported yet.')\n\n splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes\n\n if split_name not in splits_to_sizes:\n raise ValueError('data split name %s not recognized' % split_name)\n\n # Prepare the variables for different datasets.\n num_classes = _DATASETS_INFORMATION[dataset_name].num_classes\n ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label\n\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Specify how the TF-Examples are decoded.\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/filename': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpg'),\n 'image/height': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/width': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/segmentation/class/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n 'image/lastmask/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/lastmask/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n 'image/firstimage/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/firstimage/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpg'),\n 'image/firstmask/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/firstmask/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n }\n\n items_to_handlers = {\n 'image': tfexample_decoder.Image(\n image_key='image/encoded',\n format_key='image/format',\n channels=3),\n 'image_name': tfexample_decoder.Tensor('image/filename'),\n 'height': tfexample_decoder.Tensor('image/height'),\n 'width': tfexample_decoder.Tensor('image/width'),\n 'labels_class': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded',\n format_key='image/segmentation/class/format',\n channels=1),\n 'last_mask': tfexample_decoder.Image(\n image_key='image/lastmask/encoded',\n format_key='image/lastmask/format',\n channels=1),\n 'first_image': tfexample_decoder.Image(\n image_key='image/firstimage/encoded',\n format_key='image/firstimage/format',\n channels=3),\n 'first_mask': tfexample_decoder.Image(\n image_key='image/firstmask/encoded',\n format_key='image/firstmask/format',\n channels=1),\n\n }\n\n decoder = tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n return dataset.Dataset(\n data_sources=file_pattern,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=splits_to_sizes[split_name],\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n ignore_label=ignore_label,\n num_classes=num_classes,\n name=dataset_name,\n multi_label=True)", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n else:\n data = datasets.load_wine()\n\n X = data.data\n y = data.target\n return X, y", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def get_dataset(name, split, data_dir=\"~/tensorflow_datasets\"):\n\n data_dir = os.path.join(os.getcwd(),'data/VOC')\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n assert split in [\"train\", \"train+validation\", \"validation\", \"test\"]\n\n dataset, info = tfds.load(name, split=split, data_dir=data_dir, with_info=True)\n return dataset, info", "def make_dataset(dataset_name):\n return {\n\n 'duc': DUCDataset(),\n\n 'icsi-asr': ICSIASRDataset(),\n 'icsi-ht': ICSIHumanTranscriptDataset(),\n\n 'inspec-train': InspectTrainingDataset(),\n 'inspec-val': InspectValidationDataset(),\n 'inspec-test': InspectTestDataset(),\n\n 'nus': NUSDataset()\n\n }[dataset_name]", "def get_dataset(name, split, data_dir=\"~/tensorflow_datasets\"):\n assert split in [\"train\", \"train+validation\", \"validation\", \"test\"]\n dataset, info = tfds.load(name, split=split, data_dir=data_dir, with_info=True)\n return dataset, info", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset", "def init_dataset(validation_dataset_name):\n transform = transforms.Compose([transforms.ToPILImage(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n if validation_dataset_name == 'datasetRAP':\n # validation = 8317 images = 166 batches of 50 images + 1 batch of 17 images\n dataset_valid = loader_rapdataset_yiqiang.RAPDataset(0,False,'/storage/Datasets/Rap-PedestrianAttributeRecognition/',transform)\n labels = loader_rapdataset_yiqiang.ATTRIBUTES\n datset_attr_nbr = 92\n elif validation_dataset_name == 'datasetPETA':\n dataset_valid = loader_peta_dataset.PETADataset(False, '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = loader_peta_dataset.ATTRIBUTES\n datset_attr_nbr = 104\n elif validation_dataset_name == 'datasetRAPPETA':\n dataset_valid = loader_rap_plus_peta_dataset.RAPPlusPETADataset(False, '/storage/Datasets/Rap-PedestrianAttributeRecognition/', '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n datset_attr_nbr = 49\n\n print (\"Dataset valid size :\", dataset_valid.__len__())\n print (\"Dataset Attributes number :\", datset_attr_nbr)\n assert (len(labels) == datset_attr_nbr)\n\n dataloader_valid = DataLoader(dataset_valid, batch_size=Param_Batchsize, shuffle=True, num_workers=Param_Nb_Workers)\n\n return dataloader_valid, dataset_valid", "def prepare_train_dataset(name, reso, batch_size=32):\r\n transform = transforms.Compose([\r\n transforms.RandomResizedCrop(size=reso, interpolation=3),\r\n transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2),\r\n transforms.RandomVerticalFlip(),\r\n transforms.ToTensor()\r\n ])\r\n\r\n path = config.datasets[name]\r\n\r\n if name == 'coco':\r\n img_datasets = CocoDataset(root=path['train_imgs'], annFile=path['train_anno'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=CocoDataset.collate_fn)\r\n elif name == 'voc':\r\n img_datasets = VocDataset(train_list=path['train_imgs'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=VocDataset.collate_fn)\r\n\r\n return img_datasets, dataloder", "def train_test_set_split(dataset, dataset_name, test_size=0.1):\n train_indices_path = './' + dataset_name + '_train_indices(' + str(test_size) + ').txt'\n test_indices_path = './' + dataset_name + '_test_indices(' + str(test_size) + ').txt'\n try:\n train_indices = []\n test_indices = []\n file = open(train_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n train_indices.append(int(line[:-1]))\n file.close()\n file = open(test_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n test_indices.append(int(line[:-1]))\n file.close()\n train_labels = [dataset.targets[i] for i in train_indices]\n except FileNotFoundError:\n indices = np.arange(len(dataset))\n labels = np.array(dataset.targets)\n train_indices, test_indices, train_labels, _ = train_test_split(\n indices, labels, test_size=test_size, stratify=labels\n )\n file = open(train_indices_path, 'wt', encoding='utf-8')\n for i in train_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n file = open(test_indices_path, 'wt', encoding='utf-8')\n for i in test_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n\n train_set = torch.utils.data.Subset(dataset, indices=train_indices)\n test_set = torch.utils.data.Subset(dataset, indices=test_indices)\n return train_set, test_set, train_labels", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')", "def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20", "def create_model_and_data(dataset_name: str, use_synthetic_data: bool) ->...:\n # This `train_batch_size` is only used in training clients, not validation and\n # test clients, which are the ones we used to evaluation the personalization\n # performance. For validation and test clients, batching is applied after\n # splitting their local data into a personalization set and an eval set (i.e.,\n # inside `knn_per_avg_clients` above).\n unused_batch_size = 20\n if dataset_name == 'emnist':\n return emnist.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'stackoverflow':\n return stackoverflow.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'landmark':\n return landmark.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'ted_multi':\n return ted_multi.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n raise ValueError(f'Accepted dataset names: {constants.DATASET_NAMES}, but '\n f'found {dataset_name}. Please provide a valid name.')", "def load_dataset(name, version):\n dataset_dir = os.path.join(DATA_DIR, name)\n dataset_ver_dir = os.path.join(dataset_dir, version)\n\n if not os.path.isdir(dataset_dir):\n raise FileNotFoundError(\"Dataset dir not found\")\n if not os.path.isdir(dataset_ver_dir):\n raise FileNotFoundError(\"Dataset version dir not found\")\n\n train_data = load_kg_file(os.path.join(dataset_ver_dir, \"train.txt.gz\"))\n valid_data = load_kg_file(os.path.join(dataset_ver_dir, \"valid.txt.gz\"))\n test_data = load_kg_file(os.path.join(dataset_ver_dir, \"test.txt.gz\"))\n\n dataset = KgDataset()\n dataset.load_triples(train_data, tag=\"train\")\n dataset.load_triples(valid_data, tag=\"valid\")\n dataset.load_triples(test_data, tag=\"test\")\n return dataset", "def get_video10_dataset(dataset_name, split_name, dataset_dir):\n if dataset_name not in _DATASETS_INFORMATION:\n raise ValueError('The specified dataset is not supported yet.')\n\n splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes\n\n if split_name not in splits_to_sizes:\n raise ValueError('data split name %s not recognized' % split_name)\n\n # Prepare the variables for different datasets.\n num_classes = _DATASETS_INFORMATION[dataset_name].num_classes\n ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label\n\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Specify how the TF-Examples are decoded.\n keys_to_features = {\n 'image/encoded0': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded1': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded2': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded3': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded4': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded5': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded6': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded7': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded8': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/encoded9': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/filename': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpg'),\n 'image/height': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/width': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/segmentation/class/encoded0': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded1': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded2': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded3': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded4': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded5': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded6': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded7': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded8': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/encoded9': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n 'image/lastmask/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/lastmask/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n 'image/firstimage/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/firstimage/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpg'),\n 'image/firstmask/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/firstmask/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n }\n\n items_to_handlers = {\n 'image0': tfexample_decoder.Image(\n image_key='image/encoded0',\n format_key='image/format',\n channels=3),\n 'image1': tfexample_decoder.Image(\n image_key='image/encoded1',\n format_key='image/format',\n channels=3),\n 'image2': tfexample_decoder.Image(\n image_key='image/encoded2',\n format_key='image/format',\n channels=3),\n 'image3': tfexample_decoder.Image(\n image_key='image/encoded3',\n format_key='image/format',\n channels=3),\n 'image4': tfexample_decoder.Image(\n image_key='image/encoded4',\n format_key='image/format',\n channels=3),\n 'image5': tfexample_decoder.Image(\n image_key='image/encoded5',\n format_key='image/format',\n channels=3),\n 'image6': tfexample_decoder.Image(\n image_key='image/encoded6',\n format_key='image/format',\n channels=3),\n 'image7': tfexample_decoder.Image(\n image_key='image/encoded7',\n format_key='image/format',\n channels=3),\n 'image8': tfexample_decoder.Image(\n image_key='image/encoded8',\n format_key='image/format',\n channels=3),\n 'image9': tfexample_decoder.Image(\n image_key='image/encoded9',\n format_key='image/format',\n channels=3),\n 'image_name': tfexample_decoder.Tensor('image/filename'),\n 'height': tfexample_decoder.Tensor('image/height'),\n 'width': tfexample_decoder.Tensor('image/width'),\n 'labels_class0': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded0',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class1': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded1',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class2': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded2',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class3': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded3',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class4': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded4',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class5': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded5',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class6': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded6',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class7': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded7',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class8': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded8',\n format_key='image/segmentation/class/format',\n channels=1),\n 'labels_class9': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded9',\n format_key='image/segmentation/class/format',\n channels=1),\n 'last_mask': tfexample_decoder.Image(\n image_key='image/lastmask/encoded',\n format_key='image/lastmask/format',\n channels=1),\n 'first_image': tfexample_decoder.Image(\n image_key='image/firstimage/encoded',\n format_key='image/firstimage/format',\n channels=3),\n 'first_mask': tfexample_decoder.Image(\n image_key='image/firstmask/encoded',\n format_key='image/firstmask/format',\n channels=1),\n\n }\n\n decoder = tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n return dataset.Dataset(\n data_sources=file_pattern,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=splits_to_sizes[split_name],\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n ignore_label=ignore_label,\n num_classes=num_classes,\n name=dataset_name,\n multi_label=True)", "def get_datasets(data):\n train_dataset, test_dataset = None, None\n data_dir = '../data'\n\n if data == 'fmnist':\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.2860], std=[0.3530])])\n train_dataset = datasets.FashionMNIST(data_dir, train=True, download=True, transform=transform)\n test_dataset = datasets.FashionMNIST(data_dir, train=False, download=True, transform=transform)\n \n elif data == 'fedemnist':\n train_dir = '../data/Fed_EMNIST/fed_emnist_all_trainset.pt'\n test_dir = '../data/Fed_EMNIST/fed_emnist_all_valset.pt'\n train_dataset = torch.load(train_dir)\n test_dataset = torch.load(test_dir) \n \n elif data == 'cifar10':\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True, transform=transform_train)\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True, transform=transform_test)\n train_dataset.targets, test_dataset.targets = torch.LongTensor(train_dataset.targets), torch.LongTensor(test_dataset.targets) \n \n return train_dataset, test_dataset", "def get_dataset(\n dataset_name: str = \"CIFAR100\",\n transform_train: str = \"simple_augment_train_cifar100\",\n transform_test: str = \"simple_augment_test_cifar100\",\n transform_train_params: Dict[str, int] = None,\n transform_test_params: Dict[str, int] = None,\n) -> Tuple[VisionDataset, VisionDataset]:\n if not transform_train_params:\n transform_train_params = dict()\n\n # preprocessing policies\n transform_train = getattr(\n __import__(\"src.augmentation.policies\", fromlist=[\"\"]),\n transform_train,\n )(**transform_train_params)\n transform_test = getattr(\n __import__(\"src.augmentation.policies\", fromlist=[\"\"]),\n transform_test,\n )(**transform_test_params)\n\n # pytorch dataset\n \n # Dataset = getattr(__import__(\"torchvision.datasets\", fromlist=[\"\"]), dataset_name)\n # trainset = Dataset(\n # root=\"save/data\", train=True, download=True, transform=transform_train\n # )\n # testset = Dataset(\n # root=\"save/data\", train=False, download=False, transform=transform_test\n # )\n\n # return trainset, testset\n \n # Densedepth dataset\n from src.densedepth_data import getTrainingTestingData\n train_loader, test_loader = getTrainingTestingData(10,40)\n\n return train_loader, test_loader", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def dataset(name):\n t = \"unknown\"\n if name ==\"boston\":\n # regression (506x13feat)\n from sklearn.datasets import load_boston\n X, y = load_boston(return_X_y=True)\n t = \"R\"\n #X,y = shap.datasets.boston()\n #return X,y\n elif name == \"iris\":\n # classification (150x4featx3classes)\n from sklearn.datasets import load_iris\n data = load_iris()\n X = data.data\n y = data.target\n t = \"C\"\n elif name == \"diabetes\":\n # regression (442x10feat)\n from sklearn.datasets import load_diabetes\n X, y = load_diabetes(return_X_y=True)\n t = \"R\"\n elif name == \"digits\":\n # classification (1797x64featx10classes)\n from sklearn.datasets import load_digits\n X, y = load_digits(return_X_y=True)\n t = \"C\"\n elif name == \"wine\":\n # classification (178x13featuresx3classes)\n from sklearn.datasets import load_wine\n X, y = load_wine(return_X_y=True)\n t = \"C\"\n elif name == \"breast_cancer\":\n # classification (569x30featx2classes)\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n t = \"C\"\n elif name ==\"nhanesi\":\n X,y = shap.datasets.nhanesi()\n t = \"R\"\n elif name == \"segments\":\n X,y = make_led()\n t = \"C\"\n elif name == \"segments_sampled\":\n X,y = make_led_sample()\n t = \"C\"\n elif name == \"friedman1\":\n from sklearn.datasets import make_friedman1\n X,y= make_friedman1(n_samples=500, random_state=0)\n print('Done')\n X = pd.DataFrame(X, columns=list(range(X.shape[1])))\n t = 'R'\n elif name == \"friedman2\":\n from sklearn.datasets import make_friedman2\n X,y= make_friedman2(random_state=0)\n t = 'R'\n elif name == 'linear':\n X, y, t = draw_linear_function()\n elif name == \"linear2\":\n importlib.reload(lreg)\n X,y,t = lreg.lf_dataset(nsamples=5000, with_vimp=False)\n elif name == 'friendman3':\n X, y, t = friedman_modified()\n else:\n raise ValueError(\"dataset `{}` not implemented\".format(name))\n return X,y,t", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def get_dataset_name():\n return os.getenv(\"AICROWD_DATASET_NAME\", \"cars3d\")", "def get_dataset(config, data_rng):\n _, train_ds, test_ds = create_datasets(config, data_rng)\n\n if config.dataset == 'mnist':\n shape = (28, 28, 1)\n n_classes = 256\n elif config.dataset == 'binarized_mnist':\n shape = (28, 28, 1)\n n_classes = 2\n elif config.dataset == 'cifar10':\n shape = (32, 32, 3)\n n_classes = 256\n else:\n raise ValueError\n\n return train_ds, test_ds, shape, n_classes", "def prepare_data(dataset, train_ratio=0.8, input_dim=None, seed=10):\n # Retrieve main path of project\n dirname = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and store dataset at chosen location\n if dataset == 'Cora' or dataset == 'PubMed' or dataset == 'Citeseer':\n path = os.path.join(dirname, 'data')\n data = Planetoid(path, name=dataset, split='full')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n # data.train_mask, data.val_mask, data.test_mask = split_function(data.y.numpy())\n # data = Planetoid(path, name=dataset, split='public', transform=T.NormalizeFeatures(), num_train_per_class=20, num_val=500, num_test=1000)\n\n elif dataset == 'Amazon':\n path = os.path.join(dirname, 'data', 'Amazon')\n data = Amazon(path, 'photo')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), seed=seed)\n # Amazon: 4896 train, 1224 val, 1530 test\n \n elif dataset in ['syn1', 'syn2', 'syn4', 'syn5']: \n data = synthetic_data(\n dataset, dirname, train_ratio, input_dim)\n \n elif dataset == 'syn6':\n data = gc_data(dataset, dirname, train_ratio)\n\n elif dataset == 'Mutagenicity':\n data = gc_data(dataset, dirname, train_ratio)\n\n return data", "def load_dataset(dataset):\n print('Start loading dataset - ' + dataset + '...\\n')\n # split into training and test sets\n (ds_train, ds_test), ds_info = tfds.load(dataset, split=['train','test'], with_info=True, shuffle_files=True)\n\n print('Dataset - ' + dataset + ' loaded into train and test splits successfully.\\n')\n\n print(\"The list of all available labels for this dataset:\")\n print(list(ds_info.features.keys())) # extract available labels from ds_info \n print()\n\n print(\"The input shape of the provided image in the dataset:\")\n print(ds_info.features['image'].shape) # extract image shape from ds_info\n print()\n\n # print the size of training and test sets to console\n print(\"The number of images in the training set: \" + str(ds_info.splits['train'].num_examples))\n print(\"The number of images in the test set: \" + str(ds_info.splits['test'].num_examples))\n print()\n\n return ds_train, ds_test, ds_info", "def _validate_dataset_name(self, dataset_name: Optional[str]) -> str:\n if dataset_name is None:\n if self.num_datasets > 1:\n raise ValueError(\"`dataset_name` is required if there are \"\n \"more than one datasets.\")\n dataset_name = next(iter(self._datasets))\n if dataset_name not in self._datasets:\n raise ValueError(\"Dataset not found: \", dataset_name)\n return dataset_name", "def load_processed_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n path = os.path.join('downloads', name)\n path_processed = os.path.join(path, 'processed')\n\n if name == 'iris':\n return pd.read_csv(os.path.join(path_processed, 'iris.csv'))\n\n elif name == 'wine':\n return pd.read_csv(os.path.join(path_processed, 'wine.csv'))\n\n elif name == 'titanic':\n return pd.read_csv(os.path.join(path_processed, 'titanic.csv'))\n\n elif name == 'lanl':\n with open(os.path.join(path_processed, 'train_data.pkl'), 'rb') as f:\n x = pkl.load(f)\n with open(os.path.join(path_processed, 'train_targets.pkl'), 'rb') as f:\n y = pkl.load(f)\n return x, y\n\n elif name == 'MNIST' or name == 'FashionMNIST':\n training = torch.load(os.path.join(path_processed, 'training.pt'))\n test = torch.load(os.path.join(path_processed, 'test.pt'))\n return training, test", "def switch_to_dataset(self, dataset_name: Optional[str] = None):\n self._current_dataset_name = self._validate_dataset_name(dataset_name)", "def load_data_tf_datasets(dataset_name, target_num_train_per_class,\n target_num_val, seed):\n logging.info('Loading and preprocessing data from tensorflow datasets...')\n # Load train data.\n ds = tfds.load(dataset_name, split=tfds.Split.TRAIN, batch_size=-1)\n ds = tfds.as_numpy(ds)\n train_inputs, train_labels = ds['image'], ds['label']\n # Load test data.\n ds = tfds.load(dataset_name, split=tfds.Split.TEST, batch_size=-1)\n ds = tfds.as_numpy(ds)\n test_inputs, test_labels = ds['image'], ds['label']\n\n # Remove extra dimensions of size 1.\n train_labels = np.squeeze(train_labels)\n test_labels = np.squeeze(test_labels)\n\n logging.info('Splitting data...')\n data = split_train_val_unlabeled(train_inputs, train_labels,\n target_num_train_per_class, target_num_val,\n seed)\n train_inputs = data[0]\n train_labels = data[1]\n val_inputs = data[2]\n val_labels = data[3]\n unlabeled_inputs = data[4]\n unlabeled_labels = data[5]\n\n logging.info('Converting data to Dataset format...')\n data = Dataset.build_from_splits(\n name=dataset_name,\n inputs_train=train_inputs,\n labels_train=train_labels,\n inputs_val=val_inputs,\n labels_val=val_labels,\n inputs_test=test_inputs,\n labels_test=test_labels,\n inputs_unlabeled=unlabeled_inputs,\n labels_unlabeled=unlabeled_labels,\n feature_preproc_fn=convert_image)\n return data", "def load_data_realistic_ssl(dataset_name, data_path, label_map_path):\n logging.info('Loading data from pickle at %s.', data_path)\n train_set, validation_set, test_set = pickle.load(open(data_path, 'rb'))\n train_inputs = train_set['images']\n train_labels = train_set['labels']\n val_inputs = validation_set['images']\n val_labels = validation_set['labels']\n test_inputs = test_set['images']\n test_labels = test_set['labels']\n # Load label map that specifies which trainining labeles are available.\n train_indices = json.load(open(label_map_path, 'r'))\n train_indices = [\n int(key.encode('ascii', 'ignore')) for key in train_indices['values']\n ]\n train_indices = np.asarray(train_indices)\n\n # Select the loaded train indices, and make the rest unlabeled.\n unlabeled_mask = np.ones((train_inputs.shape[0],), dtype=np.bool)\n unlabeled_mask[train_indices] = False\n unlabeled_inputs = train_inputs[unlabeled_mask]\n unlabeled_labels = train_labels[unlabeled_mask]\n train_inputs = train_inputs[train_indices]\n train_labels = train_labels[train_indices]\n\n # Select a feature preprocessing function, depending on the dataset.\n feature_preproc_fn = ((lambda image: image)\n if dataset_name == 'cifar10' else convert_image)\n\n data = Dataset.build_from_splits(\n name=dataset_name,\n inputs_train=train_inputs,\n labels_train=train_labels,\n inputs_val=val_inputs,\n labels_val=val_labels,\n inputs_test=test_inputs,\n labels_test=test_labels,\n inputs_unlabeled=unlabeled_inputs,\n labels_unlabeled=unlabeled_labels,\n feature_preproc_fn=feature_preproc_fn)\n return data", "def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def dataset(options):\n pass", "def get_split(split_name, dataset_dir, seed, batch_size, file_pattern=None, reader=None):\n\n train_questions = pd.read_json(os.path.join(dataset_dir, '../raw-data/VQA_Dataset/train_questions_annotations.json'))\n #os.path.join(dataset_dir, '../raw-data/VQA_Dataset/')\n\n #Questions\n questions = list(train_questions.iloc[0])\n #Answers\n answers = list(train_questions.iloc[2])\n #Images\n images = np.array(train_questions.iloc[1])\n\n #transform answers into values of dictionary\n for i in range(len(answers)):\n answers[i] = labels_dict[answers[i]]\n \n\n \n dataset = CustomDataset(os.path.join(dataset_dir, '../raw-data/VQA_Dataset'), 'training', train_questions=train_questions) #preprocessing_function=preprocess_input\n dataset_valid = CustomDataset(os.path.join(dataset_dir, '../raw-data/VQA_Dataset'), 'validation', train_questions=train_questions) #preprocessing_function=preprocess_input\n\n \n\n train_dataset = tf.data.Dataset.from_generator(lambda: dataset,\n output_types=((tf.int32, tf.float32), tf.int32),\n output_shapes=(([max_questions_length], [int(img_h/2), int(img_w/2), 3]), [num_classes]))\n\n\n\n validation_dataset = tf.data.Dataset.from_generator(lambda: dataset_valid,\n output_types=(( tf.int32, tf.float32), tf.int32),\n output_shapes=(([max_questions_length], [int(img_h/2), int(img_w/2), 3]), [num_classes]))\n\n #train_dataset = train_dataset.shuffle(2048, seed = seed)\n\n train_dataset = train_dataset.prefetch(buffer_size = tf.data.experimental.AUTOTUNE)\n\n #validation_dataset = validation_dataset.shuffle(2048, seed = seed)\n\n validation_dataset = validation_dataset.prefetch(buffer_size = tf.data.experimental.AUTOTUNE)\n\n train_dataset = train_dataset.batch(batch_size) \n\n train_dataset = train_dataset.repeat() \n \n validation_dataset = validation_dataset.batch(batch_size)\n \n validation_dataset = validation_dataset.repeat()\n\n\n return _NUM_CLASSES, SPLITS_TO_SIZES['train'] ,SPLITS_TO_SIZES['validation'], train_dataset, validation_dataset", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def test_dataset_autogen_with_test(autogen_dataset_with_test):\n train_dummy = \"Etiam ligula tortor, dictum eu, placerat eget, venenatis a, magna.\"\n val_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n test_dummy = \"a sollicitudin orci sem eget massa. Suspendisse eleifend. Cras sed\"\n\n assert autogen_dataset_with_test.train[0][0] == train_dummy\n assert autogen_dataset_with_test.train[0][1] == '6'\n assert len(autogen_dataset_with_test.train) == 80\n\n assert autogen_dataset_with_test.val[0][0] == val_dummy\n assert autogen_dataset_with_test.val[0][1] == '6'\n assert len(autogen_dataset_with_test.val) == 20\n\n assert autogen_dataset_with_test.test[0][0] == test_dummy\n assert autogen_dataset_with_test.test[0][1] == '3'\n assert len(autogen_dataset_with_test.test) == 50", "def get_dataset(args):\n\n if args.dataset == 'cifar':\n data_dir = 'data/cifar/'\n apply_transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True,\n transform=apply_transform)\n\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True,\n transform=apply_transform)\n\n # sample training data amongst users\n if args.iid:\n user_groups = cifar_iid(train_dataset, args.num_users)\n else:\n if args.unequal:\n # Chose euqal splits for every user\n user_groups = cifar_noniid(train_dataset, args.num_users)\n else:\n user_groups = cifar_noniid_class(train_dataset, args.num_users, args.class_per_user)\n \n elif args.dataset == 'mnist' or 'fmnist':\n if args.dataset == 'mnist':\n data_dir = 'data/mnist/'\n else:\n data_dir = 'data/fmnist/'\n\n apply_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])\n\n train_dataset = datasets.MNIST(data_dir, train=True, download=True,\n transform=apply_transform)\n\n test_dataset = datasets.MNIST(data_dir, train=False, download=True,\n transform=apply_transform)\n\n # sample training data amongst users\n if args.iid:\n # Sample IID user data from Mnist\n user_groups = mnist_iid(train_dataset, args.num_users)\n else:\n # Sample Non-IID user data from Mnist\n if args.unequal:\n # Chose uneuqal splits for every user\n user_groups = mnist_noniid_unequal(train_dataset, args.num_users)\n else:\n # Chose euqal splits for every user\n user_groups = mnist_noniid_class(train_dataset, args.num_users, args.class_per_user)\n\n return train_dataset, test_dataset, user_groups", "def get_dataset_name(self):\n raise NotImplementedError", "def load_dataset(self, problem_name=\"\", split=\"train\"):\n\n orm = self.__orm\n username = \"admin\" # should be unused (unless submit new feature to db)\n\n with orm.session_scope() as session:\n if not problem_name:\n problem_name = session.query(Problem.name)\\\n .filter(Problem.name != \"demo\").scalar()\n problem_id = session.query(Problem.id)\\\n .filter(Problem.name == problem_name).scalar()\n\n data_dir = os.path.join(\"/data\", split)\n dataset, entities_featurized, target = load_dataset_from_dir(\n session, data_dir, problem_name)\n\n suffix = \"_\" + split\n\n return problem_name, dataset, entities_featurized, target", "def pre_train(self, dataset, **kwargs):\n\n pass", "def default_builder(self, dataset_name, eval_dataset_name):\n builder = tfds.builder(dataset_name, data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec()\n logging.info('Training on TFDS dataset %s with split %s',\n dataset_name, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=self.shuffle_train_files)\n\n if eval_dataset_name is None:\n logging.info('Evaluating on TFDS dataset %s with split %s',\n dataset_name, 'validation' + shard_spec)\n eval_data = self.default_eval_builder(builder, shard_spec)\n else:\n eval_dataset, *eval_split = eval_dataset_name.split(':')\n if not eval_split:\n eval_split = 'validation'\n else:\n eval_split = eval_split[0]\n logging.info('Evaluating on TFDS dataset %s with split %s',\n eval_dataset, eval_split + shard_spec)\n eval_builder = tfds.builder(eval_dataset, data_dir=self.data_dir)\n eval_data = eval_builder.as_dataset(split=eval_split + shard_spec,\n shuffle_files=False)\n return train_data, eval_data", "def get_video_dataset(dataset_name, split_name, dataset_dir):\n if dataset_name not in _DATASETS_INFORMATION:\n raise ValueError('The specified dataset is not supported yet.')\n\n splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes\n\n if split_name not in splits_to_sizes:\n raise ValueError('data split name %s not recognized' % split_name)\n\n # Prepare the variables for different datasets.\n num_classes = _DATASETS_INFORMATION[dataset_name].num_classes\n ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label\n\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Specify how the TF-Examples are decoded.\n keys_to_context_features = {\n 'image/filename': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpg'),\n 'image/height': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/width': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/segmentation/class/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n 'image/num': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n }\n\n keys_to_sequence_features = {\n 'image/encoded': tf.FixedLenSequenceFeature(\n (), tf.string, default_value=None),\n 'image/segmentation/class/encoded': tf.FixedLenSequenceFeature(\n (), tf.string, default_value=None),\n }\n\n items_to_handlers = {\n 'image_name': tfseqexample_decoder.Tensor('image/filename'),\n 'height': tfseqexample_decoder.Tensor('image/height'),\n 'width': tfseqexample_decoder.Tensor('image/width'),\n 'num': tfseqexample_decoder.Tensor('image/num'),\n }\n\n items_to_handlers_list = {\n 'image': tfseqexample_decoder.Image(\n image_key='image/encoded',\n format_key='image/format',\n repeated=True,\n channels=3),\n 'labels_class': tfseqexample_decoder.Image(\n image_key='image/segmentation/class/encoded',\n format_key='image/segmentation/class/format',\n repeated=True,\n channels=1),\n }\n\n decoder = tfseqexample_decoder.TFSeqExampleDecoder(\n keys_to_context_features, keys_to_sequence_features, items_to_handlers, items_to_handlers_list)\n\n return dataset.Dataset(\n data_sources=file_pattern,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=splits_to_sizes[split_name],\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n ignore_label=ignore_label,\n num_classes=num_classes,\n name=dataset_name,\n multi_label=True)", "def get_dataset(name,\n n_train=None,\n n_test=None,\n permute_train=False,\n do_flatten_and_normalize=True,\n data_dir=None,\n input_key='image'):\n ds_builder = tfds.builder(name + (':3.*.*' if name != 'imdb_reviews' else ''))\n\n ds_train, ds_test = tfds.as_numpy(\n tfds.load(\n name + (':3.*.*' if name != 'imdb_reviews' else ''),\n split=['train' + ('[:%d]' % n_train if n_train is not None else ''),\n 'test' + ('[:%d]' % n_test if n_test is not None else '')],\n batch_size=-1,\n as_dataset_kwargs={'shuffle_files': False},\n data_dir=data_dir))\n\n train_images, train_labels, test_images, test_labels = (ds_train[input_key],\n ds_train['label'],\n ds_test[input_key],\n ds_test['label'])\n\n if do_flatten_and_normalize:\n train_images = _partial_flatten_and_normalize(train_images)\n test_images = _partial_flatten_and_normalize(test_images)\n\n num_classes = ds_builder.info.features['label'].num_classes\n train_labels = _one_hot(train_labels, num_classes)\n test_labels = _one_hot(test_labels, num_classes)\n\n if permute_train:\n permutation = np.random.RandomState(0).permutation(train_images.shape[0])\n train_images = train_images[permutation]\n train_labels = train_labels[permutation]\n\n return train_images, train_labels, test_images, test_labels", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def create_dataset(base_path, batch_size, is_train):\n columns_list = [\"feature\", \"label\"]\n num_consumer = 4\n\n if is_train:\n path = os.path.join(base_path, 'aclImdb_train.mindrecord0')\n else:\n path = os.path.join(base_path, 'aclImdb_test.mindrecord0')\n\n data_set = ds.MindDataset(path, columns_list, num_consumer)\n ds.config.set_seed(0)\n data_set = data_set.shuffle(buffer_size=data_set.get_dataset_size())\n data_set = data_set.batch(batch_size, drop_remainder=True)\n return data_set", "def test_training(self):\n\t\tpass", "def run():\r\n \r\n LABEL = data.LabelField(use_vocab=True)\r\n TEXT = data.Field(sequential=True, tokenize=lambda x:x.split(), lower=True, fix_length=config.MAX_LENGTH)\r\n\r\n### 1/5\r\n dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n # split the dataset, 8:2\r\n train_dataset, valid_dataset = dataset.split(split_ratio=[0.8,0.2], random_state=random.getstate())\r\n \r\n test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n \r\n### 2\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n# valid_dataset = data.TabularDataset(path=config.VAL_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n \r\n### 3/4\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# dataset = data.TabularDataset(path=config.TEST_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n# # split the dataset, 5:5\r\n# valid_dataset, test_data = dataset.split(split_ratio=[0.5,0.5], random_state=random.getstate())\r\n\r\n### 5\r\n\r\n\r\n\r\n # load embeddings\r\n vectors_data = load_vectors(config.EMBEDDING_FNAME)\r\n\r\n TEXT.build_vocab(train_dataset, vectors=vectors_data)\r\n LABEL.build_vocab(train_dataset)\r\n print ('vector size:',TEXT.vocab.vectors.size())\r\n embedding_pretrained_matrix = TEXT.vocab.vectors\r\n \r\n # create torch device\r\n print(\"To device...\")\r\n USE_CUDA = torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\r\n\r\n train_it, valid_it = data.BucketIterator.splits((train_dataset, valid_dataset),\r\n batch_sizes=(config.TRAIN_BATCH_SIZE,config.VAL_BATCH_SIZE), \r\n device=device, \r\n sort_key=lambda x: len(x.text),\r\n sort_within_batch=False,\r\n shuffle=True,\r\n repeat=False)\r\n test_it = data.BucketIterator(test_data, \r\n batch_size=config.TEST_BATCH_SIZE, \r\n sort_key=lambda x: len(x.text), \r\n shuffle=False,\r\n device=device)\r\n \r\n \r\n # fetch model\r\n vocab_size = len(TEXT.vocab) # TEXT.vocab.vectors.size()\r\n# pretrained_vec = TEXT.vocab.vectors\r\n \r\n # selecte network \r\n x = import_module('networks.'+config.NETWORK)\r\n model = x.Model(vocab_size,embedding_pretrained=embedding_pretrained_matrix)\r\n \r\n # send model to device\r\n model.to(device)\r\n\r\n # initialize Adam optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)\r\n\r\n # if you have multiple GPUs, model model to DataParallel to use multiple GPUs\r\n if torch.cuda.device_count() > 1:\r\n model = nn.DataParallel(model)\r\n \r\n params_list = []\r\n # train and validate for all epochs\r\n for epoch in range(config.EPOCHS):\r\n epoch_start_time = time.time()\r\n\r\n ###----Train--------\r\n train_outputs, train_labels, train_loss = engine.train_fn(train_it, model, optimizer, device)\r\n train_outputs = torch.Tensor(train_outputs)\r\n _, train_predicted = torch.max(train_outputs, dim=1)\r\n train_parameters_dict = metrics_func.performance_evaluation_func(train_predicted,train_labels,epoch=str(epoch))\r\n # save train paremeters\r\n params_list.append(train_parameters_dict)\r\n train_f1 = train_parameters_dict['f1_score_macro']\r\n train_prec = train_parameters_dict['precision_macro']\r\n train_recall = train_parameters_dict['precision_macro']\r\n print('\\n')\r\n print(f\" Train Epoch: {epoch}, F1 = {train_f1},precision = {train_prec},recall = {train_recall}\")\r\n ###------------\r\n \r\n # validate\r\n val_outputs, val_labels, valid_loss = engine.evaluate_fn(valid_it, model, device)\r\n val_outputs = torch.Tensor(val_outputs)\r\n _, val_predicted = torch.max(val_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n val_parameters_dict = metrics_func.performance_evaluation_func(val_predicted, val_labels, epoch=str(epoch),flag='val')\r\n # save evaluation paremeters\r\n params_list.append(val_parameters_dict)\r\n \r\n val_f1 = val_parameters_dict['f1_score_macro']\r\n val_prec = val_parameters_dict['precision_macro']\r\n val_recall = val_parameters_dict['recall_macro']\r\n print(f\"Val Epoch: {epoch},F1 = {val_f1},precision = {val_prec}, recall = {val_recall}\")\r\n \r\n ###-------Test-----------------------\r\n test_outputs, test_labels, test_loss = engine.evaluate_fn(test_it, model, device)\r\n test_outputs = torch.Tensor(test_outputs)\r\n _, test_predicted = torch.max(test_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n test_parameters_dict = metrics_func.performance_evaluation_func(test_predicted, test_labels, epoch=str(epoch),flag='test')\r\n # save evaluation paremeters\r\n params_list.append(test_parameters_dict)\r\n \r\n test_f1 = test_parameters_dict['f1_score_macro']\r\n test_prec = test_parameters_dict['precision_macro']\r\n test_recall = test_parameters_dict['recall_macro']\r\n print(f\"test Epoch: {epoch},F1 = {test_f1},precision = {test_prec}, recall = {test_recall}\")\r\n \r\n lr_scheduler = LRScheduler(optimizer)\r\n lr_scheduler(valid_loss)\r\n \r\n \r\n # simple early stopping\r\n# val_f1 = float(val_f1)\r\n #f1 = (float(train_f1) + float(val_f1)) / 2\r\n val_loss = float(valid_loss)\r\n early_stopping(val_loss, model)\r\n if early_stopping.early_stop:\r\n print(\"Early stopping\")\r\n break\r\n # 获得 early stopping 时的模型参数\r\n# model.load_state_dict(torch.load('checkpoint.pt'))\r\n\r\n# save_model_func(model, epoch, path='outputs')\r\n \r\n metrics_func.save_parameters_txt(params_list)", "def get_inference_dataset(dataset_path,debug=False):\n\n if not os.path.exists(dataset_path):\n assert False, \"Couldn't find path : '{}'\".format(dataset_path)\n print(\"\\nprocessing data :'{}'\\n\".format(dataset_path))\n\n path = os.getcwd()\n os.chdir(dataset_path)\n\n dataset = []\n for file in tqdm(os.listdir('.')):\n if not file.endswith('features'):\n continue\n name = file.replace(\".features\", \"\") # removing \"features\"\n x = np.loadtxt(name + '.features')\n np.nan_to_num(x, copy=False)\n #get labels file\n if os.path.exists(name + '.test.labels'):\n labels_file = open(name + '.test.labels').readlines()\n elif os.path.exists(name + '.labels'):\n labels_file = open(name + '.labels').readlines()\n else:\n continue\n file_info = (name , float(labels_file[-2].split(' ')[-1]),\n np.fromstring(labels_file[1].strip(), sep=' ')[:2],\n float(labels_file[2]))#(file name,window_offset,(onset,offset),vot_type)\n\n dataset.append([torch.from_numpy(x).float(), file_info])\n if debug and len(dataset)>100:\n break\n os.chdir(path)\n\n return DataLoader(dataset,shuffle=False)", "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def run(dataset = 1):\n train_list, test_list = load_list(dataset, False)\n train_imgs = process_list(train_list)\n test_imgs = process_list(test_list)\n with open(os.path.join(WORKING_DIR, 'data', 'train' + str(dataset) + '.txt'), 'w') as f:\n for img in train_imgs:\n f.write(img)\n f.write(' ')\n if img[-14] == 'F':\n f.write('1')\n else:\n f.write('0')\n f.write('\\n')\n with open(os.path.join(WORKING_DIR, 'data', 'test' + str(dataset) + '.txt'), 'w') as f:\n for img in test_imgs:\n f.write(img)\n f.write(' ')\n if img[-14] == 'F':\n f.write('1')\n else:\n f.write('0')\n f.write('\\n')", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes", "def getDataset(self, train=True):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \n \n if self.dataset == \"ELLIPSE\":\n a = np.array([[0,1.0],[1.0,2.0]]) \n b = a*0.5 \n myE = el.ellipse(device, 500, 100, a, b) \n if train == True:\n return myE.create_dataset(myE.examples)\n return myE.create_dataset(myE.valid) \n \n if self.dataset == \"SWISS\": \n myS = sw.SwissRoll(device, 500, 0.2) \n if train == True:\n return myS.create_dataset(myS.examples)\n return myS.create_dataset(myS.valid)\n \n \n #open file\n myFile = h5py.File(self.dataString, 'r', self.driver)\n \n if train == True: \n inputString = \"train_inputs\"\n labelsString = \"train_labels\"\n \n else:\n inputString = \"test_inputs\"\n labelsString = \"test_labels\"\n \n #get hdf5 datsets\n features = myFile.get(inputString)\n labels = myFile.get(labelsString)\n \n #convert to tensors\n features = torch.from_numpy(np.array(features))\n labels = torch.from_numpy(np.array(labels))\n \n #close file to ensure dataset is in memory\n myFile.close()\n \n #conver to correct datatypes\n features = features.float()\n \n if self.conv_sg == False:\n labels = labels.long() \n \n dataset = torch.utils.data.TensorDataset(features, labels)\n \n return dataset", "def switch_to_train_data(self) -> None:\n if self._train_name not in self._datasets:\n raise ValueError(\"Training data not provided.\")\n self.switch_to_dataset(self._train_name)", "def create_dataset():\n ds_train_raw, ds_test_raw = load_raw_datasets()\n vectorize_layer = create_vectorizer(ds_train_raw)\n ds_train = ds_train_raw.map(\n lambda text, label: (vectorize_layer(text), label)\n ).prefetch(tf.data.experimental.AUTOTUNE)\n ds_test = ds_test_raw.map(\n lambda text, label: (vectorize_layer(text), label)\n ).prefetch(tf.data.experimental.AUTOTUNE)\n return ds_train, ds_test, vectorize_layer", "def _createDatasets(self, datasetName):\r\n\r\n # Set the feature vector dtype\r\n if self.featureVectorDType == 'str':\r\n dt = h5py.special_dtype(vlen=str)\r\n else:\r\n dt = self.featureVectorDType\r\n\r\n\t\t# Initialize the datasets\r\n logger.debug('Creating dataset `{}`.'.format(datasetName))\r\n\r\n if self.featureVectorSize > 1:\r\n self.datasets[datasetName] = self.db.create_dataset(str(datasetName), (self.estNumSamples, self.featureVectorSize), maxshape=(None, self.featureVectorSize), dtype=dt)\r\n else:\r\n self.datasets[datasetName] = self.db.create_dataset(str(datasetName), (self.estNumSamples,), maxshape=(None,), dtype=dt)", "def get_dataset(dataset: str, split: str) -> Dataset:\n if dataset == \"imagenet\":\n return _imagenet(split)\n elif dataset == \"imagenet32\":\n return _imagenet32(split)\n elif dataset == \"cifar10\":\n return _cifar10(split)", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def create_dataset(project, dataset_name):\n dataset = dataset_name\n get_dataset = project.datasets.get(dataset_name=dataset)\n project.datasets.create(dataset_name=dataset_name)\n \n return get_dataset", "def dataset(request):\n X, y = make_classification(\n n_samples=700, n_features=10, n_informative=8, n_redundant=2,\n n_classes=2, n_clusters_per_class=2, random_state=6483\n )\n\n request.cls.dataset = Dataset(X, y)", "def prepare_dataset(dataset_name='MNIST', data_path='data', val_size=0.1,\n task_pairs=[(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)],\n truncate_size=None,\n train_transform=None,\n val_transform=None,\n test_transform=None):\n if dataset_name == 'MNIST':\n dataset_class = datasets.MNIST\n elif dataset_name == 'Fashion-MNIST':\n dataset_class = datasets.FashionMNIST\n elif dataset_name == 'KMNIST':\n dataset_class = datasets.KMNIST\n elif dataset_name == 'CIFAR10':\n dataset_class = datasets.CIFAR10\n elif dataset_name == '_CPU_224': # for imitating 224x224 data\n tasked_train = _CPUDataset(in_ch=3, height=224)\n tasked_val = _CPUDataset(in_ch=3, height=224)\n tasked_test = _CPUDataset(in_ch=3, height=224)\n return tasked_train, tasked_val, tasked_test\n else:\n raise NotImplementedError\n\n data_train = dataset_class(root=data_path, train=True,\n download=True)\n\n X, y = data_train.data / 255., data_train.targets\n if dataset_name == 'CIFAR10':\n X = torch.FloatTensor(X).permute(0, 3, 1, 2)\n y = torch.LongTensor(y)\n\n X_train, X_val, y_train, y_val = train_test_split(\n X, y, test_size=val_size, stratify=y, random_state=42)\n\n tasked_train = TaskedDataset(X_train, y_train,\n all_tasks=task_pairs,\n truncate_size=truncate_size,\n transform=train_transform)\n tasked_val = TaskedDataset(X_val, y_val,\n all_tasks=task_pairs,\n truncate_size=truncate_size,\n transform=val_transform)\n\n data_test = dataset_class(root=data_path, train=False,\n download=True)\n X_test, y_test = data_test.data / 255., data_test.targets\n if dataset_name == 'CIFAR10':\n X_test = torch.FloatTensor(X_test).permute(0, 3, 1, 2)\n y_test = torch.LongTensor(y_test)\n\n tasked_test = TaskedDataset(X_test, y_test,\n all_tasks=task_pairs,\n truncate_size=truncate_size,\n transform=test_transform)\n\n return tasked_train, tasked_val, tasked_test", "def load(dataset_name='processed_500maxnode'):\n # data_dir = os.path.join('../data/modelnet40_ply_hdf5_2048')\n # assert os.path.exists(data_dir)\n\n processed_data_dir = os.path.join(DATA_DIR, dataset_name)\n if not os.path.exists(processed_data_dir):\n os.makedirs(processed_data_dir)\n\n if len(os.listdir(processed_data_dir)) != 0:\n\n # print(\"Loading Saved Data from Disk.......\")\n\n \"\"\" pre-defined location for saving the train and test data\"\"\"\n train_dir = os.path.join(processed_data_dir, 'train')\n test_dir = os.path.join(processed_data_dir, 'test')\n\n train, max_node = load_back_from_disk(data_dir=train_dir, istrain=True)\n test, max_node_test = load_back_from_disk(data_dir=test_dir, istrain=False)\n max_node = max(max_node, max_node_test)\n\n else:\n train, test, max_node = featurize(\n processed_data_dir,\n shard_size=16)\n\n return train, test, max_node", "def test_full_dataset_from_file(full_dataset):\n train_dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n val_dummy = \"malesuada. Integer id magna et ipsum cursus vestibulum. Mauris magna.\"\n\n assert full_dataset.train[0][0] == train_dummy\n assert full_dataset.train[0][1] == '6'\n\n assert full_dataset.val[0][0] == val_dummy\n assert full_dataset.val[0][1] == '8'\n\n assert full_dataset[0][0] == train_dummy\n assert full_dataset[100][0] == val_dummy", "def get_train_test(input_dataset,ylabels):\n train_size = 0.7\n test_size = 1-train_size\n stratified_split = StratifiedShuffleSplit(ylabels,test_size=test_size,n_iter=1,random_state=77)\n\n for train_indx,test_indx in stratified_split:\n train = [input_dataset[i] for i in train_indx]\n train_y = [ylabels[i] for i in train_indx]\n \n test = [input_dataset[i] for i in test_indx]\n test_y = [ylabels[i] for i in test_indx]\n return train,test,train_y,test_y", "def load_data_and_labels(filename, dataset_name,is_train):\n label_count={}\n parameter_file = \"./parameters.json\"\n params = json.loads(open(parameter_file).read())\n if dataset_name == 'ag_news' or dataset_name == 'dbpedia' or dataset_name == 'sogou_news' or dataset_name == 'amazon_review_full' or dataset_name == 'amazon_review_polarity' :\n df = pd.read_csv(filename, names=['label', 'title', 'text'], dtype={'title': object,'text': object})\n selected = ['label', 'title','text','too_short','to_drop']\n\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[2]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df[selected[2]].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df[selected[2]].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n \n elif dataset_name == 'yelp_review_full' or dataset_name == 'yelp_review_polarity':\n df = pd.read_csv(filename, names=['label','text'], dtype={'text': object})\n selected = ['label','text','too_short','to_drop']\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[1]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['text'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['text'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n elif dataset_name == 'yahoo_answers':\n df = pd.read_csv(filename, names=['label', 'title', 'content','answer'], dtype={'title': object,'answer': object,'content': object})\n selected = ['label', 'title','content','answer','too_short','to_drop'] \n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['temp'] = df[['content','answer']].apply(lambda x: ' '.join(str(v) for v in x), axis=1)\n df['too_short']= df['temp'].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['temp'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['temp'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n return x_raw, y_raw, df, labels", "def dataset_by_name(name):\n return _datasets[name.lower()]", "def get_dataset(dataset_name, split_type, validation_percent=20,\n batch_size=None, preprocess_fn=None):\n\n if split_type == DatasetSplit.TRAIN:\n split = \"train[{}%:]\".format(validation_percent)\n elif split_type == DatasetSplit.VALID:\n split = \"train[:{}%]\".format(validation_percent)\n elif split_type == DatasetSplit.TEST:\n split = \"test\"\n elif split_type == DatasetSplit.TRAIN_FULL:\n split = \"train\"\n else:\n raise ValueError(\"Unknown split_type {}\".format(split_type))\n\n ds = tfds.load(name=dataset_name, split=split,\n as_dataset_kwargs={\"shuffle_files\": False}\n ).shuffle(1000, seed=17)\n\n if split_type in [DatasetSplit.TRAIN, DatasetSplit.TRAIN_FULL]:\n ds = ds.repeat()\n\n ds = ds.map(preprocess_fn, num_parallel_calls=batch_size)\n\n return (ds.batch(\n batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE))", "def get_dataset_simple(dataset_name,\n add_valid_to_vocab=False,\n add_test_to_vocab=False,\n to_lower_case=True,\n tok_fn=word_tokenize,\n **kwargs):\n loader = get_loader(dataset_name, **kwargs)\n return get_simple_splits(loader,\n add_valid_to_vocab=add_valid_to_vocab,\n add_test_to_vocab=add_test_to_vocab,\n tok_fn=tok_fn,\n to_lower_case=to_lower_case)", "def dataset_name(self):\n return self.dataset.name", "def _create_model_and_data(\n dataset_name: str, use_synthetic_data: bool\n) -> Tuple[constants.ModelFnType, constants.FederatedDatasetsType,\n constants.ProcessFnType, constants.SplitDataFnType, str]:\n if dataset_name == 'emnist':\n return emnist.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'stackoverflow':\n return stackoverflow.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'landmark':\n return landmark.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'ted_multi':\n return ted_multi.create_model_and_data(\n num_local_epochs=_TRAIN_EPOCHS.value,\n train_batch_size=_TRAIN_BATCH_SIZE.value,\n use_synthetic_data=use_synthetic_data)\n raise ValueError(f'Accepted dataset names: {constants.DATASET_NAMES}, but '\n f'found {dataset_name}. Please provide a valid name.')", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def train(self, dataset): \n self.random_forest.fit(dataset[:,:-1], dataset[:,-1])", "def train_and_test(self, data):\n\n np.random.shuffle(data)\n datalist = self.unpack_data(data)\n\n logger.info('[*] 75-25 partition of datasets ...')\n\n markline1 = math.floor(0.75*(len(datalist['features'])))\n markline2 = math.floor(0.75*len(datalist['labels']))\n\n train_features = datalist['features'][:(markline1)]\n test_features = datalist['features'][(markline1):]\n \n train_labels = datalist['labels'][:(markline2)]\n test_labels = datalist['labels'][(markline2):]\n\n logger.info('[*] Training started with 75% Dataset ...')\n\n self.knn_model.fit(train_features, train_labels)\n\n logger.info('[*] Testing started with 25% Dataset ...')\n print('\\n/---------------Accuracy----------------/') \n \n accuracy = self.knn_model.score(train_features, train_labels)\n print('Test set accuracy {:.2f} %'.format(accuracy*100))\n\n if accuracy < 0.40:\n logger.warning('[-.-!] Thanks for tryin\\' but this machine ain\\'t learning.')\n\n return True", "def _process_dataset(all_train_img, all_train_label, all_test_img, all_test_label):\n # Read all training and test images and set the correct path\n train_files = tf.io.gfile.listdir(all_train_img)\n test_files = tf.io.gfile.listdir(all_test_img)\n all_train_class_path = [os.path.join(all_train_img, f) for f in train_files]\n all_test_img_path = [os.path.join(all_test_img, f) for f in test_files]\n # Since Labels start at 1, substract -1 for correct indices with starting '0'\n label_np_test = read_labels_txt(all_test_label) - 1\n synsets_np_train = read_labels_mat(all_train_label)\n\n all_train_img_path = []\n label_np_train = []\n for folder in all_train_class_path:\n img_class_files = tf.io.gfile.listdir(folder)\n synset = os.path.basename(os.path.normpath(folder))\n label_train = synsets_np_train.index(synset)\n for f in img_class_files:\n all_train_img_path.append(os.path.join(folder, f))\n label_np_train.append(label_train)\n\n # Create the Datasets for training and test images with corresponding labels\n path_ds_train = tf.data.Dataset.from_tensor_slices((all_train_img_path, label_np_train))\n img_label_ds_train = path_ds_train.map(_process_image)\n path_ds_test = tf.data.Dataset.from_tensor_slices((all_test_img_path, label_np_test))\n img_label_ds_test = path_ds_test.map(_process_image)\n\n print(img_label_ds_train)\n print(img_label_ds_test)\n\n # Check an example image if necessary\n # example, = img_label_ds_test.take(1)\n for i in range(5):\n example, = img_label_ds_train.take(1)\n image, label = example[0], example[1]\n plt.figure(i)\n if image.shape[2] == 1:\n plt.imshow(tf.squeeze(image), cmap='gray')\n else:\n plt.imshow(image/255)\n print(\"Label: {}\".format(label.numpy()))\n plt.show()\n\n return img_label_ds_train, img_label_ds_test", "def get_dataset(self, purpose):\n if purpose == 'train':\n return self._get_dataset_from_generator(self._get_train_generator)\n elif purpose == 'validation':\n return self._get_dataset_from_generator(self._get_validation_generator)\n elif purpose == 'test':\n return self._get_dataset_from_generator(self._get_train_generator)\n else:\n return ValueError('Purpose should be either \\'train\\', \\'validation\\', or \\'test\\'')", "def run(dataset_dir,pic_path):\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n training_filename = _get_output_filename(dataset_dir, 'train')\n testing_filename = _get_output_filename(dataset_dir, 'test')\n\n if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):\n print('Dataset files already exist. Exiting without re-creating them.')\n return\n\n class_names = os.listdir(pic_path)\n labels_to_class_names = dict(zip(class_names,range(len(class_names))))\n \n picnames=[]\n for label in class_names:\n alabel_path=os.path.join(pic_path,label)\n names=os.listdir(alabel_path)\n picnames.extend([os.path.join(alabel_path,name) for name in names])\n random.shuffle(picnames) \n \n train_picnames = picnames[:int(0.7*len(picnames))]\n test_picnames = picnames[int(0.7*len(picnames)):]\n # First, process the training data:\n with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:\n offset = 0\n for name in train_picnames:\n offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)\n\n # Next, process the testing data:\n with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:\n offset = 0\n for name in test_picnames:\n offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)\n\n # Finally, write the labels file:\n labels_to_class_names = dict(zip(labels_to_class_names.values(),labels_to_class_names.keys())) \n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n with open(os.path.join(dataset_dir,'info.json'),'w') as f:\n info=json.dumps({'num_class':len(class_names),'num_sample_train':len(train_picnames),'num_sample_test':len(test_picnames)})\n f.write(info)\n\n print('\\nFinished converting the dataset in the {}!'.format(pic_path))\n print('\\nThe tfrecord files,info.json and labels file is located in the {}'.format(dataset_dir))", "def read_data_sets(data_path, fake_data=False, one_hot=False,\n validation_size=5000, source_url={},\n augment=False,\n percentage_train=100.,\n unbalance=False, unbalance_dict={\"percentage\": 20, \"label1\": 0, \"label2\": 8},\n ):\n\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n if fake_data:\n data_sets.train = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.validation = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.test = DataSet([], [], fake_data=True, one_hot=True)\n return data_sets\n\n if not source_url: # empty string check\n if 'fashion' in data_path:\n source_url = DEFAULT_SOURCE_URL_FASHION\n else:\n source_url = DEFAULT_SOURCE_URL_MNIST\n\n if 'fashion' in data_path or 'mnist' in data_path: # mnist or fashion\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_mnist(data_path, validation_size, source_url, one_hot)\n reshape = True\n else:\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_medical_data(data_path)\n reshape = False\n\n # add random permutation to train & validation\n np.random.seed(42)\n\n n_train = train_images.shape[0]\n perm = np.random.permutation(n_train)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n\n n_val = val_images.shape[0]\n perm = np.random.permutation(n_val)\n val_images = val_images[perm]\n val_labels = val_labels[perm]\n\n # For experiments with data-augmentation\n if augment:\n if 'fashion' in data_path: # rotations +-10 and horizontal flips\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=True)\n elif 'mnist' in data_path: # rotations +-10\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=False)\n train_images = np.concatenate([train_images, np.expand_dims(augmented_images, 3)])\n train_labels = np.concatenate([train_labels, augmented_labels])\n # for the medical datasets, you can use the \"augment\" argument while doing patch extraction\n\n # For experiments with limited amount of data\n if percentage_train != 100.:\n train_size = int(0.01*percentage_train*train_images.shape[0])\n Xtrain_images, Xval_images, ytrain, yval = train_test_split(train_images, train_labels, train_size=train_size)\n train_images = Xtrain_images\n train_labels = ytrain\n\n # For experiments with class-imbalance distribution\n if unbalance:\n n_classes = len(np.unique(np.argmax(train_labels, 1)))\n reduceto = 0.01*unbalance_dict['percentage']\n label1 = unbalance_dict['label1']\n label2 = unbalance_dict['label2']\n\n pick_ids = []\n newsize = 0\n all_classes = np.arange(0, n_classes)\n all_classes = np.delete(all_classes, np.where(all_classes == label1)[0])\n all_classes = np.delete(all_classes, np.where(all_classes == label2)[0])\n\n for lab in [label1, label2]:\n allids = np.where(np.argmax(train_labels, 1) == lab)[0]\n selectedids = np.random.choice(allids, int(reduceto * allids.shape[0]), replace=False)\n pick_ids.append(selectedids)\n newsize += len(selectedids)\n\n new_ids = convert_list_to_array(pick_ids, newsize)\n\n other_ids = []\n othersize = 0\n for lab in all_classes.tolist():\n selectedids = np.where(np.argmax(train_labels, 1) == lab)[0]\n other_ids.append(selectedids)\n othersize += len(selectedids)\n\n keep_ids = convert_list_to_array(other_ids, othersize)\n\n # new_ids: contains the indices of the reduced (imbalance) classes\n # keep_ids: contains the indices of the rest (keep the same class distribution)\n resulting_ids = np.concatenate((new_ids, keep_ids))\n np.random.shuffle(resulting_ids)\n\n train_images = train_images[resulting_ids, ...]\n train_labels = train_labels[resulting_ids, ...]\n\n data_sets.train = DataSet(train_images, train_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.validation = DataSet(val_images, val_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.test = DataSet(test_images, test_labels, fake_data=True, one_hot=True, reshape=reshape)\n\n return data_sets", "def read_in_and_split_data(iris_data):\n iris_data = datasets.load_iris()\n data = iris_data['data']\n targets = iris_data['target']\n train_data, test_data, train_targets, test_targets = train_test_split(data, targets, test_size=0.1) \n return (train_data, test_data, train_targets, test_targets)", "def test_data_set_load(data_set):\n data = data_set()\n train_data, train_labels, test_data, test_labels = data.load_data()\n\n assert len(train_data) > 0\n assert len(test_data) > 0\n assert len(train_data) == len(train_labels)\n assert len(test_data) == len(test_labels)", "def get_dataset(name, *, tfds_data_dir=None, seed=547):\n\n kwargs = {}\n kwargs['tfds_data_dir'] = tfds_data_dir\n name_prefix = name\n\n if name_prefix not in ['lsun', *SimpleDataset.DATASET_NAMES]:\n kwargs['seed'] = seed\n\n if name_prefix not in DATASETS:\n raise ValueError(\"Dataset %s is not available.\" % name)\n\n return DATASETS[name_prefix](**kwargs)", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def dtrain(directory):\n return dataset(directory, 'train-images-idx3-ubyte',\n 'train-labels-idx1-ubyte')", "def train(self, dataset = \"Amazon\", top_words=10000):\n assert dataset in datasets, 'Dataset should be in that list ' + str(datasets)\n if dataset == 'Amazon':\n X_train, y_train = load_dataset('dataset/amazonreviews/data', self.nb_lines_amazon)\n else:\n (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)\n raise Exception('Dead code... This should be retest again')", "def train(train_set, test_set, train_label, test_label, data_name, test_filenames, dimension_reduce=False,\n distribute_training=False):\n train_set = np.array(train_set)\n test_set = np.array(test_set)\n\n print(\"The shape of training set before dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set before dimension reduction is {0}\".format(test_set.shape))\n print('Use distribute training ? >> {0}'.format(distribute_training))\n reg = linear_model.BayesianRidge()\n\n if dimension_reduce:\n pca = PCA(n_components=128)\n train_set = pca.fit_transform(train_set)\n test_set = pca.fit_transform(test_set)\n\n print(\"The shape of training set after dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set after dimension reduction is {0}\".format(test_set.shape))\n\n if not distribute_training:\n reg.fit(train_set, train_label)\n else:\n train_set, test_set, train_label, test_label = da.array(train_set), da.array(test_set), da.array(\n train_label), da.array(test_label)\n reg.fit(train_set, train_label)\n\n predicted_label = reg.predict(test_set)\n mae_lr = round(mean_absolute_error(test_label, predicted_label), 4)\n rmse_lr = round(math.sqrt(mean_squared_error(test_label, predicted_label)), 4)\n pc = round(np.corrcoef(test_label, predicted_label)[0, 1], 4)\n print('===============The Mean Absolute Error of Model is {0}===================='.format(mae_lr))\n print('===============The Root Mean Square Error of Model is {0}===================='.format(rmse_lr))\n print('===============The Pearson Correlation of Model is {0}===================='.format(pc))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, './model/BayesRidge_%s.pkl' % data_name)\n print('The regression model has been persisted...')\n\n mkdirs_if_not_exist('./result')\n\n out_result(test_filenames, predicted_label, test_label, None, path='./result/Pred_GT_{0}.csv'.format(data_name))\n\n df = pd.DataFrame([mae_lr, rmse_lr, pc])\n df.to_csv('./result/%s.csv' % data_name, index=False)\n print('The result csv file has been generated...')", "def datasets(self):\n return [Dataset.ENSEMBL]", "def load_raw_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n\n os.makedirs('downloads', exist_ok=True)\n path = os.path.join('downloads', name)\n path_raw = os.path.join(path, 'raw')\n\n if name == 'iris':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'iris.data'), names=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid', 'species'])\n\n elif name == 'wine':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'wine.data'), names=['class',\n 'alcohol',\n 'malic_acid',\n 'ash',\n 'alkalinity',\n 'magnesium',\n 'phenols',\n 'flavanoids',\n 'nonflavanoid_phenols',\n 'proanthocyanins',\n 'color_intensity',\n 'hue',\n 'dilution',\n 'proline'])\n\n elif name == 'titanic':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path_raw)) == 0:\n kaggle.api.competition_download_files('titanic', path_raw)\n titanic = pd.read_csv(os.path.join(path_raw, 'train.csv'))\n titanic_test = pd.read_csv(os.path.join(path_raw, 'test.csv'))\n return titanic, titanic_test\n\n elif name == 'lanl':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path)) == 0:\n kaggle.api.competition_download_files('LANL-Earthquake-Prediction', path_raw)\n if not os.path.exists(os.path.join(path_raw, 'test')):\n zip_ref = zipfile.ZipFile(os.path.join(path_raw, 'test.zip'), 'r')\n zip_ref.extractall(os.path.join(path_raw, 'test'))\n zip_ref.close()\n return pd.read_csv(os.path.join(path_raw, 'train.csv.zip'))\n\n elif name == 'MNIST':\n mnist = torchvision.datasets.MNIST('downloads', train=True, download=True)\n mnist_test = torchvision.datasets.MNIST('downloads', train=False, download=True)\n return mnist, mnist_test\n\n elif name == 'FashionMNIST':\n fmnist = torchvision.datasets.FashionMNIST('downloads', train=True, download=True)\n fmnist_test = torchvision.datasets.FashionMNIST('downloads', train=False, download=True)\n return fmnist, fmnist_test", "def load_data_fashion_mnist(batch_size, resize=None): #@save\n (x_train, y_train), (x_test,y_test) = tf.keras.datasets.fashion_mnist.load_data()\n x_train,x_test = x_train/255, x_test/255\n print(y_train.shape)\n # Divide all numbers by 255 so that all pixel values are between\n # 0 and 1, add a batch dimension at the last. And cast label to int32\n train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n train_dataset = train_dataset.shuffle(1000).batch(batch_size)\n test_dataset = test_dataset.batch(batch_size)\n return train_dataset,test_dataset", "def load_train_data(batch_size,\n data_path='',\n dataset_name='cifar10',\n n_data=50000,\n randomize_labels=False):\n if not data_path:\n train_dataset = tfds.load(\n name=dataset_name, split='train', as_supervised=True)\n else:\n if 'tiny' in data_path:\n train_dataset = tfds.load(\n name=dataset_name, split='train[:6%]', as_supervised=True)\n elif 'half' in data_path:\n train_dataset = tfds.load(\n name=dataset_name, split='train[:50%]', as_supervised=True)\n else:\n train_dataset = tfds.load(\n name=dataset_name, split='train[:25%]', as_supervised=True)\n\n if randomize_labels:\n all_labels = []\n all_images = []\n for images, labels in train_dataset:\n all_labels.extend([labels.numpy()])\n all_images.append(images.numpy()[np.newaxis, :, :, :])\n all_images = np.vstack(all_images)\n np.random.seed(FLAGS.copy)\n np.random.shuffle(all_labels)\n train_dataset = tf.data.Dataset.from_tensor_slices(\n (tf.convert_to_tensor(all_images, dtype=tf.float32),\n tf.convert_to_tensor(all_labels, dtype=tf.int64)))\n\n train_dataset = train_dataset.shuffle(buffer_size=n_data)\n train_dataset = train_dataset.map(\n functools.partial(preprocess_data, is_training=True),\n num_parallel_calls=tf.data.AUTOTUNE)\n train_dataset = train_dataset.batch(batch_size, drop_remainder=True)\n train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return train_dataset", "def test_train_dataset(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n expected = [\n {'alpha': 0.6931471805599453,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 1.3},\n {'alpha': 0.9729550745276565,\n 'dim': 1,\n 'inequal': 'lt',\n 'threshold': 1.0},\n {'alpha': 0.8958797346140273,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 0.90000000000000002}\n ]\n self.assertEqual(classifiers, expected)", "def create_1st_dataset_rtacltest1(driver, dataset_name):\n assert wait_on_element(driver, 5, '//tr[contains(.,\"tank\")]//mat-icon[text()=\"more_vert\"]', 'clickable')\n driver.find_element_by_xpath('//tr[contains(.,\"tank\")]//mat-icon[text()=\"more_vert\"]').click()\n assert wait_on_element(driver, 4, '//button[normalize-space(text())=\"Add Dataset\"]', 'clickable')\n driver.find_element_by_xpath('//button[normalize-space(text())=\"Add Dataset\"]').click() \n assert wait_on_element(driver, 5, '//h3[text()=\"Add Dataset\"]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Name\"]').send_keys(dataset_name)\n assert wait_on_element(driver, 5, '//mat-select[@ix-auto=\"select__Share Type\"]')\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Share Type\"]').click()\n assert wait_on_element(driver, 5, '//mat-option[@ix-auto=\"option__Share Type_SMB\"]', 'clickable')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Share Type_SMB\"]').click()\n assert wait_on_element(driver, 5, '//button[@ix-auto=\"button__SAVE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__SAVE\"]').click()", "def load_mnist(dataset_name='mnist', **kwargs):\n dataset_name = dataset_name.strip().lower().replace('minist', 'mnist')\n\n if dataset_name.lower() not in ['mnist', 'fashion-mnist']:\n raise ValueError('Only mnist or fashion-mnist are valid dataset_name.')\n\n base = 'http://yann.lecun.com/exdb/mnist/'\n if dataset_name == 'fashion-mnist':\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n\n dirname = os.path.join(_trident_dir, dataset_name)\n make_dir_if_need(dirname)\n\n \"\"\"Load MNIST data from `path`\"\"\"\n trainData = None\n testData = None\n for kind in ['train', 'test']:\n labels_file = '{0}-labels-idx1-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n images_file = '{0}-images-idx3-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n # if dataset_name == 'emnist' :\n # labels_file='emnist-balanced-'+labels_file\n # images_file = 'emnist-balanced-' + images_file\n\n is_data_download = download_file(base + labels_file, dirname, labels_file, dataset_name + '_labels_{0}'.format(kind))\n is_label_download = download_file(base + images_file, dirname, images_file, dataset_name + '_images_{0}'.format(kind))\n if is_data_download and is_label_download:\n labels_path = os.path.join(dirname, labels_file)\n images_path = os.path.join(dirname, images_file)\n labeldata = None\n imagedata = None\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n labels = np.squeeze(labels).astype(np.int64)\n labeldata = LabelDataset(labels.tolist(),object_type=ObjectType.classification_label)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16)\n images = np.reshape(images, (len(labels), 784)).astype(dtype=_session.floatx)\n images = np.reshape(images, (-1, 28, 28))\n imagedata = ImageDataset(images, object_type=ObjectType.gray)\n if kind == 'train':\n trainData = Iterator(data=imagedata, label=labeldata)\n else:\n testData = Iterator(data=imagedata, label=labeldata)\n\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] if dataset_name == 'mnist' else ['T-shirt/top', 'Trouser', 'Pullover',\n 'Dress', 'Coat', 'Sandal', 'Shirt',\n 'Sneaker', 'Bag', 'Ankle boot'],\n 'en-US')\n\n return dataset\n return None", "def pre_train_person(self, dataset, **kwargs):\n \n pass" ]
[ "0.7228631", "0.70241654", "0.70175004", "0.70087993", "0.6977617", "0.6900802", "0.68130064", "0.6774054", "0.67727417", "0.67533123", "0.673811", "0.67380196", "0.6732079", "0.6708", "0.6619651", "0.6580174", "0.65409255", "0.6507022", "0.6495979", "0.6469288", "0.64399636", "0.6430534", "0.64198136", "0.6419272", "0.6416908", "0.6404549", "0.63958627", "0.63842446", "0.63745505", "0.63723415", "0.6369405", "0.6360896", "0.63607216", "0.63385123", "0.633764", "0.6328929", "0.63232565", "0.63232565", "0.63170505", "0.6307923", "0.6306626", "0.6287282", "0.6270653", "0.6254663", "0.62469196", "0.6246451", "0.6234785", "0.6231411", "0.6227634", "0.6215897", "0.6214375", "0.6210371", "0.62052965", "0.6203223", "0.6201904", "0.61985683", "0.61985683", "0.6195917", "0.6180638", "0.6179708", "0.6179627", "0.6179395", "0.6175689", "0.617264", "0.61643606", "0.6160177", "0.61552984", "0.6154384", "0.6154202", "0.61435753", "0.6137506", "0.6116254", "0.6111997", "0.6106134", "0.6105421", "0.60960424", "0.6092703", "0.6087021", "0.60828525", "0.60823953", "0.6081129", "0.607478", "0.6070845", "0.60668796", "0.6063693", "0.60625696", "0.60612506", "0.6053987", "0.6053173", "0.6053117", "0.6049725", "0.6049355", "0.6046537", "0.60406405", "0.60377705", "0.60265386", "0.6025807", "0.6014338", "0.6013667", "0.60114014", "0.60061955" ]
0.0
-1
Return the number of dimension of the state space
def getStatesDim(self): return 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_space_dimension(cls) -> int:\n return 1", "def n_dim(self):\n return self._n_dim", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def dimension(self):\n return self.__N", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def _get_state_sizes(self):\n ds = self.builder.nodes[self.ds_inputs[0]]\n return [[ds.xdim]]", "def dimensionality(self):\n return int(self.nDims)", "def dim(self) -> int:\n return self._n_dim", "def dimension_count(self):\n return self._dimensionCount", "def getNumDimensions(self):\n return len(self.di.keys())", "def dim(self):\n return len(self._n)", "def dim(self):\n return len(self.shape)", "def dim(self):\n return len(self.shape)", "def n_dims(self):\n return len(self.dimensions)", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def dim(self) -> int:", "def size(self):\n\t\treturn self.dims", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def num_dims(self):\n return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]", "def state_size(self):\n # estimated state, its covariance, and the step number\n return [[self.dim_x], [self.dim_x * self.dim_x], [1]]", "def dims(self):\n return tuple(d for d in (v.states for v in self.__vars)) if len(self.__vars) else (1,)", "def dim(self) -> int:\n pass", "def get_dimension_length(self):\n pass", "def _get_ndim(self):\n return len(self.level_shapes[0])", "def count_dims(da):\n return len(da.dims)", "def dim(self) -> int:\n return self.atoms.shape[:-1]", "def state_size(self):\n\t\treturn (\n\t\t\ttf.TensorShape([self.args[\"kb_node_max_len\"], self.args[\"mp_state_width\"]]),\n\t\t)", "def state_size(self):\n return [tf.TensorShape([self.dmodel]),tf.TensorShape([self.dmodel]),tf.TensorShape([self.dmodel])]", "def dimension_size(self):\n return self._dim", "def dim(self):\n return self.ambient_dim() - self.n_equations()", "def dimensions():", "def dim(self):\n\t\treturn self.D", "def get_dimension_number(self) -> int:\n return np.squeeze(self._channel_arrays[0]).ndim", "def dim(self):\n return self.__dim__", "def __len__(self):\n return self.n_node.shape[0]", "def state_size(self):\n raise NotImplementedError(\"Please implement this method\")", "def n_dims(self):\n return self.pdm.n_dims", "def N(self):\n return self._dimensions", "def dimension(self):\n\t\treturn self.d", "def dimension(self):\n return self._dim", "def getDimension(self):\n return len(self.components)", "def dim(self):\n return self._dim", "def dimensionality(self):\n if self.vector.shape is ():\n return 0\n if len(self.vector.shape) is 1:\n return 1\n _, dim = self.vector.shape\n return dim", "def dim(self):\n return self._d", "def xdim(self):\n return len(self._x)", "def ndims(x):\n return len(x.get_shape())", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def dims(x):\n return len(x.shape)", "def dim(self):\n return self.m, self.n", "def get_in_dim(self) -> int:\n return self.in_dim", "def get_input_dim(self) -> int:\n raise NotImplementedError", "def ndim(self):\n if self._ndim is None:\n self._ndim = self.get_mesh_dimension()\n\n return self._ndim", "def num_dimensions(self):\n if self.__num_dimensions__ == 0:\n # Try to get the number of dimensions from the first point or bounding box\n if len(self.points) > 0:\n self.__num_dimensions__ = len(self.points[0].coordinate)\n elif len(self.bounding_boxes) > 0:\n self.__num_dimensions__ = len(self.bounding_boxes[0].start)\n return self.__num_dimensions__", "def get_dims(self):\n row_lbl, col_lbl = self.get_idxvals()\n return len(row_lbl), len(col_lbl)", "def dimension(self):", "def ndim(self):\n return self.initial_value.ndim", "def dimension(self):\n return np.prod(np.asarray(self.subsystem_dims))", "def ndim(self):\n # type: () -> int\n return len(self.shape)", "def outdim(self):\n return len(self.getSensors())", "def get_dim(self, name):\n return len(self.root_group.dimensions[name])", "def ndim(self):\n return len(self._shape)", "def get_num_states(self):\n return self.n_states", "def dim(self):\n return (self.n, )", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def ndim(self):\n return len(self.shape)", "def n_dims(self):\n return self.model.template_instance.n_dims", "def ndim(self):\n return self._ndim", "def ndim(self):\n return self.__value.ndim", "def get_dim(self):\n return self.dim", "def get_dim(self):\n return self._dim", "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def get_dimensionality(self) -> int:\n return self.dimensionality", "def get_dimensionality(p_state, idx_image=-1, idx_chain=-1):\n return int(_Get_Dimensionality(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))", "def ndim(self):\n return len(self.nvars)", "def feature_dim(self):\n raise NotImplementedError", "def dimension(self) -> float:\n return self._dimensions", "def ndim(self):\n\n self._check_assigned()\n\n if (\n self.lazy\n and self.transformer is not None\n and hasattr(self.transformer, \"get_transformed_shape\")\n ):\n return len(self.transformer.get_transformed_shape(self.values))\n else:\n return self.__array__().ndim", "def get_ndim(self):\n return self.ndim", "def dim(self):\n if self._classifier is None:\n with self:\n return self._classifier.features_dim\n\n return self._classifier.features_dim", "def __len__(self):\n return self.N.shape[0]", "def get_state_shape(s):\n c = _concat(batch_size, s)\n c_static = _concat(batch_size, s, static=True)\n size = array_ops.zeros(c, dtype=dtype)\n size.set_shape(c_static)\n return size", "def _cell_state_size(self):\n state_sizes = self._cells[0].state_size\n if isinstance(state_sizes, tuple):\n return sum(state_sizes)\n return state_sizes", "def dim(self):\n if '_dim' in self.__dict__:\n return self._dim\n\n if len(self._Vrepresentation)==0:\n self._dim = -1\n return self._dim\n\n origin = vector(self._Vrepresentation[0])\n v_list = [ vector(v)-origin for v in self._Vrepresentation ]\n self._dim = matrix(v_list).rank()\n return self._dim", "def ndim(self):\n return self.X.ndim", "def dim(self):\n raise NotImplementedError", "def dim(self):\n return self.raw_wires.get_dim();", "def dim(self) -> tuple:\n if self.has_tensor(): return self.as_tensor().dim()\n else:\n return tuple(list(self[0].dim()[0]) + [len(self)]), self[0].dim()[1]", "def size(self):\n return self.states.size() * self.n_pop", "def _getdim(x):\n \n if np.ndim(x) > 1:\n \n dim = x.shape[-1]\n \n else:\n \n dim = 0\n \n return dim", "def size(self) -> tf.Tensor:", "def get_feature_space_dimensionality(self):\n return self.nextIdPruned" ]
[ "0.8514534", "0.78174734", "0.77158946", "0.77158946", "0.7706377", "0.77037966", "0.7664541", "0.76440984", "0.7634386", "0.7625269", "0.75904065", "0.7558156", "0.75198585", "0.7480286", "0.7480286", "0.7450488", "0.7439861", "0.74317974", "0.73826283", "0.7376509", "0.73712045", "0.7370739", "0.73465514", "0.73365635", "0.7335972", "0.7334126", "0.7321825", "0.7308186", "0.73076713", "0.7290619", "0.7282914", "0.72461504", "0.7243842", "0.7237183", "0.72207016", "0.72107095", "0.7208151", "0.72081155", "0.7201493", "0.7191768", "0.7183766", "0.716048", "0.7143834", "0.7128026", "0.71141374", "0.710001", "0.709678", "0.70957977", "0.708807", "0.7084486", "0.7048741", "0.7048713", "0.70359087", "0.7035587", "0.70224994", "0.70208913", "0.70206594", "0.7001931", "0.6971935", "0.6963771", "0.6961245", "0.6954344", "0.69538075", "0.6949346", "0.6941698", "0.69412374", "0.6939988", "0.69380224", "0.6934311", "0.6934311", "0.6934311", "0.6934311", "0.6934311", "0.6934311", "0.69288534", "0.6927563", "0.69242656", "0.6918362", "0.6914071", "0.6907923", "0.69072217", "0.6896279", "0.68903255", "0.6889704", "0.6888225", "0.6887559", "0.6883262", "0.68828845", "0.68766767", "0.68759376", "0.6871423", "0.68701273", "0.6868932", "0.68688583", "0.68656844", "0.6861184", "0.68490976", "0.68458575", "0.6843749", "0.6843482" ]
0.76861554
6
Returns the max and min values each dimension can take. These are returned as two tuples, `low` and `high`, where both are a list of as many elements as there is dimension to the state space.
def getStatesBounds(self): return (0, 0), (self._width - 1, self._height - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans", "def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]", "def calculate_min_max_tiles(self):", "def getMinMax(self,arr):\n minz=arr['zmg']-arr['sigma_pz']*5\n dmin=self.zcat-5*self.sigmacat\n minz[np.where(minz>dmin)]=dmin\n maxz=arr['zmg']+arr['sigma_pz']*5\n dax=self.zcat+5*self.sigmacat\n maxz[np.where(maxz<dmax)]=dmax\n return dmin,dmax", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def minMax2(x):\r\n\r\n iMinMax = [0 for i in range(2)]\r\n num = len(x)[0]\r\n\r\n iMin = 0\r\n iMax = 0\r\n\r\n #// Search for minimum and maximum in row 0 - linear values:\r\n min = x[0][iMin]\r\n max = x[0][iMax]\r\n\r\n for i in range(1, num):\r\n\r\n if (x[0][i] < min): \r\n min = x[0][i]\r\n iMin = i\r\n \r\n\r\n if (x[0][i] > max): \r\n max = x[0][i]\r\n iMax = i\r\n \r\n iMinMax[0] = iMin\r\n iMinMax[1] = iMax\r\n\r\n return iMinMax", "def min_max(xs):\n return min(xs), max(xs)", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest, lowest)", "def get_bounds(self):\n x_max = self.data['x'].max()\n y_max = self.data['y'].max()\n z_max = self.data['z'].max()\n print(\"x={}; y={}; z={}\".format(x_max, y_max, z_max))\n return (x_max, y_max, z_max)", "def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest,lowest)", "def minmax(xs):\n min_val = None\n max_val = None\n for x in xs:\n if min_val is None or x < min_val:\n min_val = x\n if max_val is None or x > max_val:\n max_val = x\n return (min_val, max_val)", "def state_limits(self) -> Tuple[torch.Tensor, torch.Tensor]:\n # define upper and lower limits based around the nominal equilibrium input\n upper_limit = torch.ones(self.n_dims)\n upper_limit[TurtleBot2D.X] = 2.0\n upper_limit[TurtleBot2D.Y] = 2.0\n upper_limit[TurtleBot2D.THETA] = 2 * np.pi\n\n lower_limit = -1.0 * upper_limit\n\n return (upper_limit, lower_limit)", "def get_minmax(self, stmt, slist):\n minel = maxel = None\n for s in slist:\n if s.keyword == \"min-elements\":\n minel = s.arg\n elif s.keyword == \"max-elements\":\n maxel = s.arg\n if minel is None:\n minst = stmt.search_one(\"min_elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max_elements\")\n if maxst:\n maxel = maxst.arg\n return (minel, maxel)", "def _set_min_max_values(self):\n\n p_1, p_2 = self.points[0], self.points[1]\n nb_dim = len(p_1.values)\n self._min_values = []\n self._max_values = []\n for d in range(nb_dim):\n d_min = min(p_1[d], p_2[d])\n d_max = max(p_2[d], p_2[d])\n self._min_values.append(d_min)\n self._max_values.append(d_max)", "def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))", "def bounds(self):\n\n if self.size == 0:\n lo, hi = np.nan, np.nan\n elif self.is_monotonic:\n lo, hi = sorted([self.coordinates[0], self.coordinates[-1]])\n elif self.dtype is np.datetime64:\n lo, hi = np.min(self.coordinates), np.max(self.coordinates)\n else:\n lo, hi = np.nanmin(self.coordinates), np.nanmax(self.coordinates)\n\n return lo, hi", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def glGetMinmax( baseFunction, target, reset, format, type, values=None):\r\n if values is None:\r\n width = 2\r\n values = images.images.SetupPixelRead( format, (width,4), type )\r\n arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[\r\n images.images.TYPE_TO_ARRAYTYPE.get(type,type)\r\n ]\r\n baseFunction(\r\n target, reset, format, type,\r\n ctypes.c_void_p( arrayType.dataPointer(values))\r\n )\r\n return values", "def get_min_max(ints):\n\n if len(ints) == 0:\n return (None,None)\n min = ints[0]\n max = ints[0]\n for x in range(1,len(ints)):\n if ints[x] > max:\n max=ints[x]\n elif ints[x] < min:\n min = ints[x]\n return (min,max)", "def get_physical_bounds(dim):\n dim = basename(dim)\n\n if dim == \"coszen\":\n trunc_low = -1.\n trunc_high = 1.\n\n elif dim == \"energy\":\n trunc_low = 0.\n trunc_high = None\n\n elif dim == \"azimuth\":\n trunc_low = 0.\n trunc_high = 2*np.pi\n\n else:\n raise ValueError(\"No physical bounds for dimension '%s' available.\"%dim)\n\n return trunc_low, trunc_high", "def spec_min_max(self):\n spec_max = np.asarray([2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,\n 2048,2048,2048,2048,2048])\n spec_min = np.asarray([ 500, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0])\n return np.vstack((spec_min, spec_max))", "def get_min_max(self) -> tuple:\r\n\r\n minimum = float(\"inf\")\r\n maximum = float(\"-inf\")\r\n\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n for name, data in self.trees_data.items():\r\n if self.trees[name][\"point_helper\"] is None:\r\n mapping = self.trees[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n return minimum, maximum", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)", "def get_extrema(list):\n max_x = max(list,key=lambda item:item[0])[0]\n max_y = max(list,key=lambda item:item[1])[1]\n min_x = min(list,key=lambda item:item[0])[0]\n min_y = min(list,key=lambda item:item[1])[1]\n return (min_x, max_x, min_y, max_y)", "def get_min_max(ints):\n if not ints:\n return None, None\n if len(ints) ==None:\n return None\n min_val = float(\"inf\")\n max_val = -float(\"inf\")\n # for each int in ints if update max_val and min_val accordingly\n for integer in ints:\n if integer > max_val:\n max_val = integer\n\n if integer < min_val:\n min_val = integer\n \n return (min_val, max_val)", "def extents(nodes):\n from numpy import min, max\n return ( min(nodes[:,0]), max(nodes[:,0]),\n min(nodes[:,1]), max(nodes[:,1]),\n min(nodes[:,2]), max(nodes[:,2]) )", "def bounds(lines):\n min_x = bench_util.Max\n min_y = bench_util.Max\n max_x = bench_util.Min\n max_y = bench_util.Min\n \n for line in lines.itervalues():\n for x, y in line:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return ((min_x, min_y), (max_x, max_y))", "def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes", "def xminmax ( self ) :\n return self.xvar.minmax()", "def min_max(arr, arr_size):\n max_t = arr[0]\n min_t = arr[0]\n for i in range(arr_size):\n if arr[i] > max_t:\n max_t = arr[i]\n if arr[i] < min_t:\n min_t = arr[i]\n return min_t, max_t", "def get_min_max(ints):\n if len(ints) <= 0:\n return ()\n min_value = ints[0]\n max_value = ints[0]\n for i in range(len(ints)):\n temp = ints[i]\n if temp <= min_value:\n min_value = temp\n if temp >= max_value:\n max_value = temp\n output = (min_value, max_value)\n# print(\"output: \", output)\n return output\n pass", "def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin", "def get_min_max(ints):\n if ints is None or len(ints) == 0:\n return None\n \n min_value = ints[0]\n max_value = ints[0]\n\n for value in ints:\n if value < min_value:\n min_value = value\n\n if value > max_value:\n max_value = value\n\n return (min_value, max_value)", "def get_min_max(ints):\r\n if len(ints) == 0:\r\n return None\r\n max = ints[0]\r\n min = ints[0]\r\n\r\n for int in ints:\r\n if int < min:\r\n min = int\r\n if int > max:\r\n max = int\r\n \r\n return min, max", "def get_min_max(ints):\n if not ints:\n return\n max = ints[0]\n min = ints[0]\n\n\n for i in ints:\n if i > max:\n max = i\n if i < min:\n min = i\n return (min, max)", "def max_in_range(self, x, y, low, high):\r\n data = np.vstack((x,y)) \r\n y_values = data[1][np.logical_and(low < data[0], data[0] < high)]\r\n x_values = data[0][np.logical_and(low < data[0], data[0] < high)]\r\n index_max_y = y_values.argmax()\r\n max_y = y_values[index_max_y]\r\n max_x = x_values[index_max_y]\r\n return max_x, max_y", "def input_bounds(self):\n return self._min_input, self._max_input", "def minmax(self):\r\n vx = [v[0] for v in self.vl]\r\n vy = [v[1] for v in self.vl]\r\n self.xmax, self.xmin = max(vx), min(vx)\r\n self.ymax, self.ymin = max(vy), min(vy)", "def _xywh2min_max(box):\n x, y, w, h = box\n return np.array([x, y, x+w, y+h])", "def get_min_max(ints):\n if ints == None or len(ints) == 0:\n return None\n if len(ints) == 1:\n return (ints[0],ints[0])\n \n max = ints[0]\n min = ints[0]\n for i in range(1, len(ints)):\n if ints[i] > max:\n max = ints[i]\n if ints[i] < min:\n min = ints[i]\n return (min,max)", "def quickMinMax(self, targetSize=1e6):\n data = self.image\n if targetSize < 2: # keep at least two pixels\n targetSize = 2\n while True:\n h, w = data.shape[:2]\n if h * w <= targetSize: break\n if h > w:\n data = data[::2, ::] # downsample first axis\n else:\n data = data[::, ::2] # downsample second axis\n return self._xp.nanmin(data), self._xp.nanmax(data)", "def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))", "def get_features_min_max(self):\n min_max_list = []\n\n # Get each feature's min and max values.\n for feature_name in self.feature_names:\n min = self.data[feature_name].min()\n max = self.data[feature_name].max()\n min_max_list.append([min, max])\n\n # Create dataframe from list of lists in correct format\n min_max_df = pd.DataFrame(min_max_list)\n min_max = min_max_df.T\n min_max.columns = self.feature_names\n min_max.index = ['min', 'max']\n\n return min_max", "def return_loose_bounds(maxlum=None):\n return[(None,None), (10**-6, None), (2., 350),\n (None, -10**-6), (None, None)]", "def _find_min_and_max_coords(self, block_locations):\n min_x, max_x, min_y, max_y = self.game_size, 0, self.game_size, 0\n for coord in block_locations:\n x = coord[constant.X]\n y = coord[constant.Y]\n if x < min_x:\n min_x = x\n if x > max_x:\n max_x = x\n if y < min_y:\n min_y = y\n if y > max_y:\n max_y = y\n return min_x, max_x, min_y, max_y", "def minmax(data):\n smallest = data[0]\n largest = data[0]\n\n for i in range(0,len(data)):\n if data[i] < smallest:\n smallest = data[i]\n elif data[i] > largest:\n largest = data[i]\n\n return(smallest,largest)", "def min_max(arr: StaticArray) -> ():\n minimum = arr.get(0) # sets min to first element\n maximum = arr.get(0) # sets max to first element\n # iterate over the elements in the array to check for < or >\n for index in range(arr.size()):\n if arr[index] < minimum: # if element is less than the current min, min = new element\n minimum = arr[index]\n elif arr[index] > maximum: # if element is greater than the current max, max = new element\n maximum = arr[index]\n return minimum, maximum", "def get_min_max_tuple(min_max_tuple, value):\n min_v, max_v = min_max_tuple\n\n min_v = smart_min(min_v, value)\n max_v = smart_max(max_v, value)\n\n return (min_v, max_v)", "def maxs(self) -> Tensor:\n return self._ranges[:, 1]", "def getminmax_tournament(arr, low, high):\n if low == high:\n return arr[low], arr[low]\n\n if abs(low - high) == 1:\n min_num = None\n max_num = None\n if arr[low] > arr[high]:\n max_num = arr[low]\n min_num = arr[high]\n else:\n max_num = arr[high]\n min_num = arr[low]\n return min_num, max_num\n\n else:\n mid = (low + high) // 2\n min_num_1, max_num_1 = getminmax_tournament(arr, low, mid)\n min_num_2, max_num_2 = getminmax_tournament(arr, mid+1, high)\n return min(min_num_1, min_num_2), max(max_num_1, max_num_2)", "def min_max(items):\n return min(items), max(items)", "def return_parameter_bounds(maximum_luminosity=20):\n return [(maximum_luminosity, maximum_luminosity + 3),\n (3 * 10 ** -4, 8 * 10 ** -3), (2., 350), (-8., -0.2),\n (-400, 400)]", "def minmax(self):\n return self._data_lim[self._n_overlay - 1]", "def get_min_max(ints):\n print(f\"input arr is {ints}\")\n max=0\n min=len(ints)-1\n for i in range(1,len(ints)):\n if ints[i]>ints[max]:\n temp=ints[i]\n ints[i]=ints[max]\n ints[max]=temp\n if ints[i]<ints[min]:\n temp=ints[i]\n ints[i]=ints[min]\n ints[min]=temp\n #print(f\"max value is {ints[max]}\")\n #print(f\"min value is {ints[min]}\")\n return(ints[min],ints[max])", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def getValues(self):\n return [self.scale_min, self.scale_max]", "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def get_raw_bounds(self) -> [Vector, Vector]:\n\t\tverts = np.array([v.co for mesh in self._meshes for v in mesh.data.vertices])\n\t\tbbox_min = Vector([*np.min(verts, axis=0)])\n\t\tbbox_max = Vector([*np.max(verts, axis=0)])\n\t\treturn bbox_min, bbox_max", "def get_bounds(p_state, idx_image=-1, idx_chain=-1):\n _min = (3*ctypes.c_float)()\n _max = (3*ctypes.c_float)()\n _Get_Bounds(ctypes.c_void_p(p_state), _min, _max,\n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [_min[i] for i in range(3)], [_max[i] for i in range(3)]", "def get_min_max(nums):\n assert(type(nums) == list), \"nums has to be a list\"\n assert(len(nums) > 0), \"get_min_max() arg is an empty sequence\"\n min_ = nums[0]\n max_ = nums[0]\n for n in nums:\n assert(type(n) == int), \"numbers in the list have to be an integer\"\n if n < min_:\n min_ = n\n if n > max_:\n max_ = n\n return (min_, max_)", "def find_max_coords(self):\n all_max_bound = []\n all_min_bound = []\n shape_dict = self.shape_dict\n for zone_id in shape_dict:\n zone_shape = shape_dict[zone_id]\n max_bound_zone = zone_shape.max_bound\n min_bound_zone = zone_shape.min_bound\n all_max_bound.append(max_bound_zone)\n all_min_bound.append(min_bound_zone)\n\n map_max_bound, unused_max = Utils.calculate_boundaries(all_max_bound)\n unused_min, map_min_bound = Utils.calculate_boundaries(all_min_bound)\n\n return (map_max_bound, map_min_bound)", "def find_mode_range(self):\n\n if (len(self.n) < 1): return -1,-1,-1,-1\n nmin = np.nanmin(self.modes['n'])\n nmax = np.nanmax(self.modes['n'])\n lmin = np.nanmin(self.modes['l'])\n lmax = np.nanmax(self.modes['l'])\n return nmin, nmax, lmin, lmax", "def test_min_max_scalar(features: List[List[float]]) -> List[List[float]]:\n min_max_features = []\n transposed_features = [list(i) for i in zip(*features)]\n for feature in transposed_features:\n min_in_feature = min(feature)\n max_in_feature = max(feature)\n if min_in_feature == max_in_feature:\n min_max_features.append([0 for x in feature])\n else:\n min_max_features.append([(x - min_in_feature) / (max_in_feature - min_in_feature) for x in feature])\n return [list(i) for i in zip(*min_max_features)]", "def find_minmax(lims, olims):\n\n limzip = zip(list(lims), list(olims), [np.min, np.max])\n return tuple([float(fn([l, ol])) for l, ol, fn in limzip])", "def bounds(self) -> tuple[float, float, float, float]:\n transform = self.transform\n a, b, c, d, e, f, _, _, _ = transform\n if b == d == 0:\n xs = (c, c + a * self.width)\n ys = (f, f + e * self.height)\n else: # rotated\n c0x, c0y = c, f\n c1x, c1y = transform * (0, self.height)\n c2x, c2y = transform * (self.width, self.height)\n c3x, c3y = transform * (self.width, 0)\n xs = (c0x, c1x, c2x, c3x)\n ys = (c0y, c1y, c2y, c3y)\n return min(xs), min(ys), max(xs), max(ys)", "def find_min_max(x):\n if not x: return None\n if len(x) == 1:\n return x[0], x[0] # the first is min and the second is max\n min_val = x[0]\n max_val = x[0]\n for i in xrange(1, len(x)):\n if x[i] < min_val:\n min_val = x[i]\n elif x[i] > max_val:\n max_val = x[i]", "def get_box(self, state, min_x, min_y, max_x, max_y):\n ret_val, min_x.value, min_y.value, max_x.value, max_y.value = self._get_box(state.encode(), min_x.value, min_y.value, max_x.value, max_y.value)\n return ret_val", "def bounds(self, resids: NDArray) -> List[Tuple[float, float]]:", "def find_max_min(number):\n if max(number) == min(number):\n return [len(number)]\n return [min(number), max(number)]", "def get_min_max_x(self, min_x = 1e9, max_x = -1e9, exclude = []): \n \n if self.verbose > 1:\n print(\"MultiLinearSpectra.get_min_max_x()\") \n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n min_x, max_x = self.mess[m][\"object\"].get_min_max_x(min_x, max_x)\n \n return min_x, max_x", "def getLimits(self):\n lims = [x * self.getSign() + self.getOffset() for x in (self.connection.getChannel(self.chanNamePrefix % 'low_limit').read(), \\\n self.connection.getChannel(self.chanNamePrefix % 'high_limit').read())]\n return (min(lims), max(lims))", "def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx", "def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds", "def min_max(input):\r\n return tuple(sorted(input)[:1]+sorted(input)[-1:]) # write a line of code to return containing min and max\r\n #tuple(input[:1]+input[-1:]) --------------- it works for the sorted lists\r\n #tuple(sorted(input)[:1]+sorted(input)[-1:]) ---------------it works for any input---slicing lists and concatinating\r\n #tuple(sorted(input))[:1]+tuple(sorted(input))[-1:]----------------it works same as the above----slicing tuples and concatinating them\r", "def Extrema(self):\n ymin = np.min(self._corners[:, 1])\n xmin = np.min(self._corners[:, 0])\n ymax = np.max(self._corners[:, 1])\n xmax = np.max(self._corners[:, 0])\n return ymin, xmin, ymax, xmax", "def _get_colorbar_limits(self):\n if self.boundaries is not None:\n C = self.boundaries\n if self.extend in [\"min\", \"both\"]:\n C = C[1:]\n\n if self.extend in [\"max\", \"both\"]:\n C = C[:-1]\n return min(C), max(C)\n else:\n return self.get_clim()", "def extremePointsTup(cnt):\n\tleftmost = tuple(cnt[cnt[:,:,0].argmin()][0])\n\trightmost = tuple(cnt[cnt[:,:,0].argmax()][0])\n\ttopmost = tuple(cnt[cnt[:,:,1].argmin()][0])\n\tbottommost = tuple(cnt[cnt[:,:,1].argmax()][0])\n\treturn (leftmost, rightmost, topmost, bottommost)", "def GetFieldMinMax(fielddef):\n minmax = {'c': (0, 0xff),\n '?': (0, 1),\n 'b': (~0x7f, 0x7f),\n 'B': (0, 0xff),\n 'h': (~0x7fff, 0x7fff),\n 'H': (0, 0xffff),\n 'i': (~0x7fffffff, 0x7fffffff),\n 'I': (0, 0xffffffff),\n 'l': (~0x7fffffff, 0x7fffffff),\n 'L': (0, 0xffffffff),\n 'q': (~0x7fffffffffffffff, 0x7fffffffffffffff),\n 'Q': (0, 0x7fffffffffffffff),\n 'f': (sys.float_info.min, sys.float_info.max),\n 'd': (sys.float_info.min, sys.float_info.max),\n }\n format_ = GetFieldDef(fielddef, fields='format_')\n min_ = 0\n max_ = 0\n\n if format_[-1:] in minmax:\n min_, max_ = minmax[format_[-1:]]\n max_ *= GetFormatCount(format_)\n elif format_[-1:].lower() in ['s','p']:\n # s and p may have a prefix as length\n max_ = GetFormatCount(format_)\n\n return min_,max_", "def getminmax_linear_search(arr):\n if len(arr) == 0:\n return None, None\n\n if len(arr) == 1:\n return arr[0], arr[0]\n\n min_num = None\n max_num = None\n if arr[0] > arr[1]:\n max_num = arr[0]\n min_num = arr[1]\n else:\n max_num = arr[1]\n min_num = arr[0]\n\n for idx in range(2, len(arr)):\n if min_num > arr[idx]:\n min_num = arr[idx]\n if max_num < arr[idx]:\n max_num = arr[idx]\n\n return min_num, max_num", "def zminmax ( self ) :\n return self.zvar.minmax()", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def hi_lo(data_series, current_max, current_min):\n \n try:\n highest = numpy.max(data_series)\n except:\n highest = max(data_series)\n \n if highest > current_max:\n new_max = highest\n else:\n new_max = current_max\n \n try: \n lowest = numpy.min(data_series)\n except:\n lowest = min(data_series)\n \n if lowest < current_min:\n new_min = lowest\n else:\n new_min = current_min\n \n return new_max, new_min", "def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()", "def get_min_max(self):\n\n mr = np.sqrt(2 * np.log(1/self.mth)) * self.ms\n mr[:] = np.max(mr)\n\n mxmin = self.mx - mr\n mxmax = self.mx + mr\n mymin = self.my - mr\n mymax = self.my + mr\n mzmin = self.mz - mr\n mzmax = self.mz + mr\n\n mb_xmin_idx = np.argmin(mxmin[self.ma > 0])\n mb_xmax_idx = np.argmax(mxmax[self.ma > 0])\n mb_ymin_idx = np.argmin(mymin[self.ma > 0])\n mb_ymax_idx = np.argmax(mymax[self.ma > 0])\n mb_zmin_idx = np.argmin(mzmin[self.ma > 0])\n mb_zmax_idx = np.argmax(mzmax[self.ma > 0])\n\n xmin0 = self.mx[mb_xmin_idx] - mr[mb_xmin_idx]\n xmax0 = self.mx[mb_xmax_idx] + mr[mb_xmax_idx]\n ymin0 = self.my[mb_ymin_idx] - mr[mb_ymin_idx]\n ymax0 = self.my[mb_ymax_idx] + mr[mb_ymax_idx]\n zmin0 = self.mz[mb_zmin_idx] - mr[mb_zmin_idx]\n zmax0 = self.mz[mb_zmax_idx] + mr[mb_zmax_idx]\n\n xmin = xmin0 - (xmax0 - xmin0) * 0.25\n xmax = xmax0 + (xmax0 - xmin0) * 0.25\n ymin = ymin0 - (ymax0 - ymin0) * 0.25\n ymax = ymax0 + (ymax0 - ymin0) * 0.25\n zmin = zmin0 - (zmax0 - zmin0) * 0.25\n zmax = zmax0 + (zmax0 - zmin0) * 0.25\n\n return xmin, xmax, ymin, ymax, zmin, zmax", "def getColMinMax(table, col):\n\tvmin = None\n\tvmax = None\n\tfor rec in table:\n\t\tvalue = rec[col]\n\t\tif vmin is None:\n\t\t\tvmin = value\n\t\t\tvmax = value\n\t\telse:\n\t\t\tif value < vmin:\n\t\t\t\tvmin = value\n\t\t\telif value > vmax:\n\t\t\t\tvmax = value\n\treturn (vmin, vmax, vmax - vmin)", "def compare_select(self, x: torch.Tensor) -> [torch.Tensor, torch.LongTensor]:\n reshaped_x = x.reshape(-1, self.n_states, 2)\n max_values, absolute_max_ind = torch.max(reshaped_x, 2)\n return max_values, absolute_max_ind", "def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1", "def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def nudged_min_max_compute(min_broadcast, max_broadcast, num_bits, narrow_range):\n\n\n dtype = min_broadcast.dtype\n quant_min = 1 if narrow_range else 0\n quant_max = (2 ** num_bits) - 1\n\n # because of need compute each channel, so quant_min and quant_max need to broadcast.\n quant_min_float = topi.full(min_broadcast.shape, dtype, tvm.const(quant_min, dtype))\n quant_max_float = topi.full(min_broadcast.shape, dtype, tvm.const(quant_max, dtype))\n\n # caculate each channel max and min difference.\n max_sub_min = topi.subtract(max_broadcast, min_broadcast)\n quant_max_sub_quant_min = topi.subtract(quant_max_float, quant_min_float)\n # compute scale = (max_broadcast - min_broadcast) / (quant_max - quant_min)\n # and min_div_scale = min_broadcast / scale\n if product_is_mini():\n scale = mul(max_sub_min, reciprocal(quant_max_sub_quant_min), target=utils.CCE)\n min_div_scale = Mul(min_broadcast, reciprocal(scale), target=utils.CCE)\n else:\n scale = divide(max_sub_min, quant_max_sub_quant_min, target=utils.CCE)\n min_div_scale = divide(min_broadcast, scale, target=utils.CCE)\n\n # zero_point_from_min = quant_min_float - min_broadcast / scale\n zero_point_from_min = topi.subtract(quant_min_float, min_div_scale)\n # if zero_point_from_min < quant_min_float, bool_less_quant_min_float = 1 else 0\n bool_less_quant_min_float = less_compare_float32(zero_point_from_min, quant_min_float)\n # if quant_max_float < zero_point_from_min, bool_more_quant_max_float = 1 else 0\n bool_more_quant_max_float = less_compare_float32(quant_max_float, zero_point_from_min)\n\n # according to above bool param to select effective value\n less_quant_min_float = topi.multiply(quant_min_float, bool_less_quant_min_float)\n more_quant_max_float = topi.multiply(quant_max_float, bool_more_quant_max_float)\n\n # compute which num is not less than quant_min_float and not large than quant_max_float\n tensor_one = topi.full(min_broadcast.shape, dtype, dc.one_const(dtype))\n bool_not_less_quant_min_float = topi.subtract(tensor_one, bool_less_quant_min_float)\n bool_not_more_quant_max_float = topi.subtract(tensor_one, bool_more_quant_max_float)\n bool_between_min_max = topi.multiply(bool_not_less_quant_min_float, bool_not_more_quant_max_float)\n between_min_max_float = topi.multiply(zero_point_from_min, bool_between_min_max)\n # add 0.5 to num which min <= num <= max and then floor them.\n between_min_max_add_half_one = topi.add(between_min_max_float, dc.half_const(dtype))\n between_min_max_round = akg.lang.ascend.floor(between_min_max_add_half_one)\n if product_is_mini():\n between_min_max_round = topi.cast(between_min_max_round, \"float16\")\n\n between_min_max_round = topi.cast(between_min_max_round, \"float32\")\n\n # calculate the maximum and minimum values of the quantization\n nudged_zero_point_tmp = topi.add(less_quant_min_float, more_quant_max_float)\n nudged_zero_point = topi.add(nudged_zero_point_tmp, between_min_max_round)\n\n nudged_min_tmp = topi.subtract(quant_min_float, nudged_zero_point)\n nudged_max_tmp = topi.subtract(quant_max_float, nudged_zero_point)\n nudged_min = topi.multiply(nudged_min_tmp, scale)\n nudged_max = topi.multiply(nudged_max_tmp, scale)\n res = [nudged_min, nudged_max, scale]\n\n return res", "def get_max_min(block_size):\r\n return (int(block_size / 2), int((block_size - 1) / 2))", "def get_min_max(ints):\n if len(ints) == 0:\n return None, None\n \n min_e = ints[0]\n max_e = ints[-1]\n for e in ints:\n if isinstance(e, int) == False: # if the list includes non-integer number, do not find min, max \n return None,None\n if e < min_e:\n min_e = e\n if e > max_e:\n max_e = e\n return min_e, max_e", "def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value", "def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)" ]
[ "0.72885597", "0.6820047", "0.6709689", "0.6641923", "0.661468", "0.648229", "0.64780694", "0.6453788", "0.64332044", "0.6412223", "0.6407123", "0.63986015", "0.63950133", "0.63915455", "0.6389562", "0.63883317", "0.6370339", "0.63583744", "0.6325942", "0.629759", "0.62925905", "0.62804073", "0.62690043", "0.6263546", "0.6241651", "0.62414294", "0.6240135", "0.6239035", "0.62382615", "0.6236971", "0.6222978", "0.62102914", "0.6204816", "0.6203671", "0.6198669", "0.6191928", "0.6191234", "0.6190591", "0.6182788", "0.6174643", "0.6171332", "0.61711943", "0.6160561", "0.6158828", "0.6157247", "0.6144806", "0.6143228", "0.6142735", "0.61409426", "0.61353767", "0.6134546", "0.61343503", "0.61335695", "0.6129534", "0.6114617", "0.61081773", "0.6097147", "0.6096194", "0.6095629", "0.60932475", "0.6084535", "0.6075995", "0.6067781", "0.6066338", "0.6048437", "0.6034888", "0.60329545", "0.60235894", "0.59961635", "0.5992842", "0.59924823", "0.5973669", "0.5957575", "0.5947248", "0.594655", "0.59382915", "0.5924997", "0.5913877", "0.5894166", "0.58914983", "0.5883603", "0.58807325", "0.5880488", "0.58769315", "0.5868057", "0.5867279", "0.5854983", "0.5848413", "0.5845814", "0.58454543", "0.5842041", "0.58377486", "0.58346266", "0.58188796", "0.5811431", "0.58110946", "0.5810921", "0.58085555", "0.58084714", "0.58082694" ]
0.58876115
80
The agent take the given action and receives back the new state, reward, whether the episode is terminated and some nothingness.
def step(self, action): x, y = self._move(action, *self._currentPos) if chr(self._grid[x, y]) == CASE_TYPES.Wall: # error - previous state was already a wall self._done = True self._trajectory.append(self._currentPos) return self._currentPos, -1, self._done, {} reward = { CASE_TYPES.Water: self.waterReward, CASE_TYPES.Sand: self.sandReward, CASE_TYPES.Open: self.stepReward, CASE_TYPES.Termination: self.successReward, CASE_TYPES.Trap: ( -(self.maxSteps - len(self._trajectory)) + self.failureReward + self.trapReward) }[chr(self._grid[x, y])] # termination state if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]: self._done = True self._currentPos = (x, y) self._trajectory.append(self._currentPos) self._nbSteps += 1 if self._nbSteps >= self.maxSteps and not self._done: reward += self.failureReward return self._currentPos, reward, self._done, {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent_step(self, reward, state):\n self.sum_rewards += reward\n self.episode_steps += 1\n\n # Make state an array of shape (1, state_dim) to add a batch dimension and\n # to later match the get_action_values() and get_TD_update() functions\n state = np.array(state)\n\n # Select action\n action = self.policy(state)\n \n # Append new experience to replay buffer\n self.replay_buffer.append(self.last_state, self.last_action, reward, 0, state)\n \n # Perform replay steps:\n if self.replay_buffer.size() > self.replay_buffer.minibatch_size:\n self.network_target.load_state_dict(self.network.state_dict())\n for _ in range(self.num_replay):\n # Get sample experiences from the replay buffer\n experiences = self.replay_buffer.sample() \n self.optimize_network(experiences)\n \n # Update the last state and last action.\n self.last_state = state\n self.last_action = action\n \n return action", "def step(self, action):\n\n ob = self.sendCmd(self.url, self.actions_list[action])\n print(\"ob -> {}\".format(ob['obsequium']))\n\n self.obsequium = int(ob[\"obsequium\"])\n self.bonus = int(ob[\"bonus\"])\n\n # we need to check is make sense finish it\n if self.is_game_done:\n raise RuntimeError(\"Episode is done\")\n self.curr_step += 1\n self._take_action(action)\n\n reward = self._get_reward()\n\n # TODO: revisar ob = self._get_state()\n\n return ob, reward, self.is_game_done, {}", "def step(self, action):\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {\n \"play\": {\n \"landlord\": self._get_reward(\"landlord\"),\n \"landlord_up\": self._get_reward(\"landlord_up\"),\n \"landlord_down\": self._get_reward(\"landlord_down\")\n },\n \"bid\": {\n \"landlord\": self._get_reward_bidding(\"landlord\")*2,\n \"landlord_up\": self._get_reward_bidding(\"landlord_up\"),\n \"landlord_down\": self._get_reward_bidding(\"landlord_down\")\n }\n }\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}", "def step(self, action: ActionType) -> EnvResponse:\n action = self.action_space.clip_action_to_space(action)\n if self.action_space and not self.action_space.contains(action):\n raise ValueError(\"The given action does not match the action space definition. \"\n \"Action = {}, action space definition = {}\".format(action, self.action_space))\n\n # store the last agent action done and allow passing None actions to repeat the previously done action\n if action is None:\n action = self.last_action\n self.last_action = action\n if self.visualization_parameters.add_rendered_image_to_env_response:\n current_rendered_image = self.get_rendered_image()\n\n self.current_episode_steps_counter += 1\n if self.phase != RunPhase.UNDEFINED:\n self.total_steps_counter += 1\n\n # act\n self._take_action(action)\n\n # observe\n self._update_state()\n\n if self.is_rendered:\n self.render()\n\n self.total_reward_in_current_episode += self.reward\n\n if self.visualization_parameters.add_rendered_image_to_env_response:\n self.info['image'] = current_rendered_image\n\n self.last_env_response = \\\n EnvResponse(\n reward=self.reward,\n next_state=self.state,\n goal=self.goal,\n game_over=self.done,\n info=self.info\n )\n\n # store observations for video / gif dumping\n if self.should_dump_video_of_the_current_episode(episode_terminated=False) and \\\n (self.visualization_parameters.dump_mp4 or self.visualization_parameters.dump_gifs):\n self.last_episode_images.append(self.get_rendered_image())\n\n return self.last_env_response", "def step(self, action):\n \n success = False\n self.curr_step += 1\n self._take_action(action)\n self._take_action(action)\n self._take_action(action)\n\n # initialize reward and get state \n reward = 0.0\n ob = self._get_state()\n\n # give dense rewards \n if not self.sparse_reward:\n reward = self._get_reward()\n\n # bad terminal conditions\n if self.curr_step >= self.max_steps \\\n or self.target_distance >= self.max_distance \\\n or self.mean_radius_sheep >= self.max_radius:\n self.finish = True\n if self.sparse_reward:\n reward = -1.0\n\n # good terminal conditions\n if self.target_distance <= 1.0:\n success = True\n self.finish = True\n if self.sparse_reward:\n reward = 1.0\n\n # update rl parameters\n self.episode_length += 1\n self.episode_reward += reward\n\n # generate info return parameter\n if self.info_mode == 1 and self.finish:\n info = {'r':self.episode_reward, 'l':self.episode_length, \n 's': success}\n else:\n info = {'n':self.num_sheep, 's': success}\n\n return ob, reward, self.finish, info", "def step(self, action):\r\n\r\n max_time = len(self.episode) - 1 # the number of states available in the episode , -1 for indexing purpose\r\n state = self.episode[self.t] # state contains LT, ST indicator, closing price\r\n closing_price = state['Close']\r\n transaction_fee = self.transaction_fee\r\n agent = self.agent\r\n reward = 0\r\n \r\n if self.t < max_time-1:\r\n if agent.stock == 0: # if there is no stock position the agent can buy or do nothing\r\n if action == 1: # buy sstock at closing price\r\n agent.stock_position = agent.starting_cash / closing_price * (1 - transaction_fee)\r\n agent.cash = 0\r\n agent.stock = 1\r\n reward = 0\r\n elif action == 0: # no action\r\n reward = 0\r\n else:\r\n reward = -10000\r\n \r\n elif agent.stock == 1: # if there is a stock position the agent can sell or do nothing\r\n if action == 2: # sell stock at closing price\r\n agent.cash = agent.stock_position * closing_price * (1 - transaction_fee)\r\n reward = agent.cash - agent.starting_cash\r\n self.done = True\r\n elif action == 0: # no action\r\n reward = 0\r\n elif action == 1:\r\n reward = -10000\r\n \r\n elif self.t == max_time-1:\r\n if agent.stock == 1:\r\n # stock position is forced to liquidate\r\n agent.cash = agent.stock_position * closing_price * (1 - transaction_fee)\r\n reward = agent.cash - agent.starting_cash\r\n self.done = True\r\n \r\n else:\r\n reward = 0\r\n self.done = True \r\n \r\n next_state = (self.episode[self.t+1]['ST Relative Indicator'], \r\n self.episode[self.t+1]['ST Relative Indicator'],\r\n agent.stock,\r\n self.t+1)\r\n self.t += 1\r\n \r\n return next_state, reward, self.done", "def step(self, action):\n # check if suggested action is valid\n valid = self._take_action(action)\n if not valid:\n _, _ = self._simulate()\n response = self.worst_response\n target = 6*60\n else:\n # simulate until a TS response is needed\n response = np.inf\n while response == np.inf:\n response, target = self._simulate()\n if np.isnan(target): # prio 2 or 3 incident: no target exists\n target = response\n\n self.last_action = action if self.action_type == \"tuple\" else self.action_num_to_tuple[action]\n # calculate reward and new state\n self.reward = self._get_reward(response, target, valid=valid)\n self.state, self.is_done = self._extract_state()\n return self.state, self.reward, self.is_done, {\"note\": \"nothing to report\"}", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info", "def step(self, action):\n\n action[1] = 0 if action[1] < 0 else 1\n\n if not self.moving:\n self.agent_host.sendCommand(\"move 0.5\")\n time.sleep(.2)\n self.moving = True\n\n # Get Action\n command = \"strafe \" + str(action[0])\n if ((action[0] < 0 and self.allow_left) or (action[0] > 0 and self.allow_right)):\n self.agent_host.sendCommand(command)\n time.sleep(.2)\n self.agent_host.sendCommand(\"strafe 0\")\n time.sleep(.1)\n\n if action[1]:\n if self.checkCommand:\n self.jumpsOverDitches += 1\n self.checkCommand = False\n self.agent_host.sendCommand(\"jump 1\")\n time.sleep(.2)\n self.agent_host.sendCommand(\"jump 0\")\n\n # if (command == \"crouch 1\"):\n # self.agent_host.sendCommand(command)\n # time.sleep(.3)\n # self.agent_host.sendCommand(\"crouch 0\")\n # time.sleep(.2)\n\n self.episode_step += 1\n\n # Get Observation\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n self.obs, self.allow_left, self.allow_right, curZPos, curXPos = self.get_observation(world_state)\n if curZPos:\n self.curZPos = curZPos\n if curXPos:\n if self.obs[3 + int(curXPos)]:\n self.checkCommand = True\n self.numDitchesEncountered += 1\n\n # Get Done\n done = not world_state.is_mission_running \n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n self.episode_return += reward\n\n return self.obs, reward, done, dict()", "def step(self, action):\n done = self.cur_step >= self.max_steps_per_episode\n\n if done:\n raise RuntimeError(\"Episode is done\")\n\n self.cur_step += 1\n\n # Compute new state based on previous state and action\n new_state = self._take_action(action)\n\n # Compute reward value based on previous state and action\n reward = self._get_reward(action)\n\n # Update current state to new state\n self.cur_state = new_state\n\n # Compute observation from current state\n ob = self._get_obs() # Has to come after new state update\n\n # Update action, observation and reward histories\n self.action_episode_memory[self.cur_episode].append(action)\n self.observation_episode_memory[self.cur_episode].append(ob)\n self.reward_episode_memory[self.cur_episode].append(reward)\n\n # Recompute done since action may have modified it\n done = self.cur_step >= self.max_steps_per_episode\n\n return ob, reward, done, {}", "def step(self, action):\r\n reward = self.__execute(action)\r\n self.__totalReward += reward\r\n status = self.__status()\r\n state = self.__observe()\r\n logging.debug(\"action: {:10s} | reward: {: .2f} | status: {}\".format(Action(action).name, reward, status))\r\n return state, reward, status", "def _step(self, action):\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start a new episode\n return self.reset()\n\n env_action = self.y_train[self.id[self.episode_step]]\n self.episode_step += 1\n\n if action == env_action: # Correct action\n if env_action: # Minority\n reward = 1 # True Positive\n else: # Majority\n reward = self.imb_rate # True Negative\n\n else: # Incorrect action\n if env_action: # Minority\n reward = -1 # False Negative\n self._episode_ended = True # Stop episode when minority class is misclassified\n else: # Majority\n reward = -self.imb_rate # False Positive\n\n if self.episode_step == self.X_len - 1: # If last step in data\n self._episode_ended = True\n\n self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint\n\n if self._episode_ended:\n return ts.termination(self._state, reward)\n else:\n return ts.transition(self._state, reward)", "def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "def _single_agent_step(self, action):\n reward = 0.0\n done = False\n self.timestep += 1\n state, player_id = self.game.step(action)\n while not self.game.is_over() and not player_id == self.active_player:\n self.timestep += 1\n action, _ = self.model.agents[player_id].eval_step(\n self._extract_state(state)\n )\n if not self.model.agents[player_id].use_raw:\n action = self._decode_action(action)\n state, player_id = self.game.step(action)\n\n if self.game.is_over():\n reward = self.get_payoffs()[self.active_player]\n done = True\n state = self.reset()\n return state, reward, done\n\n return self._extract_state(state), reward, done", "def step(self, action, skiprate=1):\n reward = self.game.make_action(action, skiprate)\n next_state = self.game.get_state()\n game_over = self.game.is_episode_finished()\n return next_state, reward, game_over", "def act(self, state):\n # Append the state to the short term memory (ie. History)\n self._history.append(state)\n\n # If policy requires agent to explore, sample random action\n if self._explorer.is_exploring(self._num_actions_taken):\n action = self._explorer(self.nb_actions)\n else:\n # Use the network to output the best action\n env_with_history = self._history.value\n q_values = self._action_value_net.eval(\n # Append batch axis with only one sample to evaluate\n env_with_history.reshape((1,) + env_with_history.shape)\n )\n\n self._episode_q_means.append(np.mean(q_values))\n self._episode_q_stddev.append(np.std(q_values))\n\n # Return the value maximizing the expected reward\n action = q_values.argmax()\n\n # Keep track of interval action counter\n self._num_actions_taken += 1\n return action", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def step(self,action):\n observation, reward, done, info = self.env.step(action)\n if info[\"health\"] <= 0 or info[\"enemy_health\"] <= 0:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n else:\n self.player_hp = info['health']\n self.enemy_hp = info[\"enemy_health\"]\n reward = self.player_hp - self.enemy_hp\n\n\n if info[\"enemy_rounds_won\"] == 2 or info[\"rounds_won\"] == 2:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n done = True\n\n obs = self.observation(observation)\n if self.current_frame_number == self.frame_skipping:\n self.q.append(obs)\n self.current_frame_number = 0 \n self.current_frame_number += 1\n reward = reward / 120 +1\n return np.array(list(self.q)), reward, done, info", "def agent_step(self, reward, observation):\n\n self.step_counter += 1\n return_action = Action()\n\n current_image, raw_image = self.preprocess_observation(observation.intArray)\n\n # if self.step_counter % 100 == 0:\n # plt.imshow(current_image)\n # plt.colorbar()\n # plt.show()\n # time.sleep(0.4)\n\n #TESTING---------------------------\n if raw_image is not None:\n self.episode_images.append(raw_image)\n self.episode_reward += reward\n int_action, max_q = self.choose_action(self.test_data_set, self.testing_epsilon,\n current_image, np.clip(reward, -1, 1))\n if max_q is not None:\n self.epoch_considered_steps += 1\n\n if self.pause > 0:\n time.sleep(self.pause)\n\n # Map it back to ALE's actions\n return_action.intArray = [int_action]\n\n self.last_action = int_action\n self.last_image = current_image\n\n return return_action", "def step(self, action):\n assert self.action_space.contains(\n action), \"%r (%s) invalid\" % (action, type(action))\n self.time_step += 1\n reward = float(0)\n self.is_episode_done = False\n\n # For testing code\n current_edge_agg_num = self.time_step\n\n # Rescale the action from [-1, 1] to [1, 2, ... , 9]\n # The action is the number of aggregations on edge servers\n # current_edge_agg_num = int((action + 2) * (action + 2))\n\n logging.info(\"RL Agent: Start time step #%s...\", self.time_step)\n logging.info(\n \"Each edge server will run %s rounds of local aggregation.\",\n current_edge_agg_num)\n\n # Pass the tuned parameter to RL agent\n self.rl_agent.get_tuned_para(current_edge_agg_num, self.time_step)\n\n # Wait for state\n current_loop = asyncio.get_event_loop()\n get_state_task = current_loop.create_task(self.wait_for_state())\n current_loop.run_until_complete(get_state_task)\n #print('State:', self.state)\n\n self.normalize_state()\n #print('Normalized state:', self.state)\n\n reward = self.get_reward()\n info = {}\n\n self.rl_agent.cumulative_reward += reward\n\n # Signal the RL agent to start next time step (next round of FL)\n self.step_done.set()\n\n return np.array([self.state]), reward, self.is_episode_done, info", "def step(self, action):\n\n if not self._is_action_legal(action):\n return self.current_state, self.reward_illegal_action, self._is_terminal_state(), None\n else:\n # Change action passed if environment should behave random\n if self.stochastic:\n if not np.random.choice([True, False], 1, p=[self.p, 1 - self.p]):\n action = np.random.choice(self.possible_actions)\n\n # Needed for reward calculation (must be done before updating data structures)\n number_of_shifts = self._get_number_of_shifts(action)\n is_cargo_mandatory = int(self.vehicle_data[2][action] == 1)\n\n slot = self.end_of_lanes[self.current_Lane]\n self.loading_sequence += \"{}. Load Vehicle Type \\t {} \\t in Lane: \\t {} \\t Row: \\t {} \\n\" \\\n .format(self.sequence_no, action, self.current_Lane, slot)\n\n self.end_of_lanes[self.current_Lane] += self.vehicle_data[4][action]\n\n if self.vehicle_data[1][action] == -1 or \\\n self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action]:\n self.number_of_vehicles_loaded[action] += 1\n\n self.loaded_vehicles[self.current_Lane][self.vehicle_Counter[self.current_Lane]] = action\n self.vehicle_Counter[self.current_Lane] += 1\n\n # Update grids\n for i in range(self.vehicle_data[4][action]):\n self.grid.T[self.current_Lane][slot + i] = self.sequence_no\n self.grid_destination.T[self.current_Lane][slot + i] = self.vehicle_data[3][action]\n self.grid_vehicle_type.T[self.current_Lane][slot + i] = self.vehicle_data[0][action]\n\n # Update lowest destination data structure\n if self.vehicle_data[3][action] < self.lowest_destination[self.current_Lane]:\n self.lowest_destination[self.current_Lane] = self.vehicle_data[3][action]\n\n self.sequence_no += 1\n # Update according to lane selection heuristic\n self.current_Lane = self._get_minimal_lanes()[0]\n\n self.possible_actions = self.get_possible_actions_of_state()\n self.current_state = self._get_current_state()\n\n if self._is_terminal_state():\n # Calculate reward for terminal state\n free_spaces = np.sum(self._get_free_capacity()) / np.sum(self.total_capacity)\n mandatory_vehicles_left_to_load = np.sum(self.vehicle_data[1][self.mandatory_cargo_mask]\n - self.number_of_vehicles_loaded[self.mandatory_cargo_mask])\n reward_features = np.array(\n [is_cargo_mandatory, number_of_shifts, free_spaces, mandatory_vehicles_left_to_load])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, True, {}\n else:\n # Calculate reward\n reward_features = np.array([is_cargo_mandatory, number_of_shifts, 0, 0])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, False, {}", "def step(self, action):\n # if self.current_turn<self.MAX_TURNS-1:\n # self.current_turn += 1\n \n\n self.current_turn += 1\n system_action = self.parseAction(action)\n \n # Used for logging and evaluation\n self.updateMetaState(system_action)\n\n self.processSystemAction(system_action)\n\n reward = self.calculateReward()\n\n user_action = self.user.respond(system_action)\n self.processUserAction(user_action)\n observation = self.generateObservation()\n done = self.isDone()\n if done:\n info = { \"successful\": self.user.goals[\"satisfied\"], \n \"first-appearance\": self.first_appearance, \n \"turn-penalty\": self.current_turn,\n \"sugg-all-penalty\":self.sugg_penalty,\n \"info-all-penalty\": self.info_penalty,\n \"eli-kw-used\": self.eli_kw_observed,\n \"eli-query-used\": self.eli_query_observed,\n }\n else:\n info = {}\n if self.training:\n if done and self.user.goals[\"satisfied\"]: reward+=30\n return observation, reward, done, info", "def step(self, action):\n self.t += 1\n state, reward, done, info = self.env.step(action)\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(self._last_observation, self.action, reward, False)\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def act(self, action:Action) -> None:\r\n if self.terminated:\r\n raise ValueError # must restart mdp first, as agent already took Terminate action on terminal\r\n\r\n if action == Action.TERMINAL:\r\n self.terminated = True\r\n return\r\n\r\n if uniform(0, 1) < self.failure:\r\n action = action.turn(uniform(0, 1) <= 0.5) # clockwise or counter-clockwise with equal chance for both\r\n print(\"FAIL:\", action)\r\n\r\n dx, dy = action.delta()\r\n x, y = self.agent_x+dx, self.agent_y+dy\r\n\r\n if x < 0 or x >= len(self.fields) or y < 0 or y >= len(self.fields[0]) or self.fields[x][y] == Field.OBSTACLE:\r\n return\r\n\r\n self.agent_x = x\r\n self.agent_y = y", "def _step(self, action):\n\n # action is generated from the action_policy (external to the environment)\n if len(action) == 4:\n object_index, new_location, action_means, action_stds = action\n if len(action) == 2:\n \"\"\"\n Action is not generated from a Gaussian distribution\n \"\"\"\n object_index, new_location = action\n action_means = action_stds = None\n \n position = new_location[:2]\n rotation = new_location[2]\n\n prev_transform = self.e.objects[object_index].transform\n\n if len(self.action_storage) > 0:\n last_progress = self.action_storage[-1][4]\n else:\n last_progress = 0\n\n info = {}\n if self.e.act(object_index, Command(position, rotation)):\n # print ('Action accepted')\n cur_transform = self.e.objects[object_index].transform\n # I need to call self.action_storage.append before get_observation_and_progress\n self.action_storage.append( [object_index, prev_transform, cur_transform, None, None, True, action_means, action_stds] )\n observation, progress = self.get_observation_and_progress()\n self.action_storage[-1][3:5] = [observation, progress]\n\n info['action_accepted'] = True\n else:\n \"\"\"\n Action failed\n We can reduce the progress to avoid falling out of the table\n \"\"\"\n if len(self.action_storage) > 0:\n # Just return observation and progress of last action\n _, _, _, observation, progress, _, _, _ = self.action_storage[-1]\n progress -= self.config.failed_action_penalty\n else:\n # First action failed\n observation, _ = self.get_observation_and_progress()\n progress = -self.config.failed_action_penalty\n \n self.action_storage.append( [object_index, prev_transform, prev_transform, observation, progress, False, action_means, action_stds] )\n\n \n info['action_accepted'] = False\n\n # Typical threshold approach\n if progress > self.progress_threshold:\n # Finish action\n done = True\n else:\n done = False\n \n reward = progress - last_progress\n #print ('Progress = %.2f ; reward = %.2f' % (progress, reward))\n\n return (observation, reward, done, info)", "def step(self, action):\r\n s = self.get_state()\r\n\r\n elements = np.arange(self.S)\r\n # weights = np.squeeze(self.nextStateProbability[s,action])\r\n weights = self.nextStateProbability[s, action]\r\n nexts = choices(elements, weights, k=1)[0]\r\n\r\n # p = self.nextStateProbability[s,action]\r\n # reward = self.rewardsTable[s,action, nexts][0]\r\n reward = self.rewardsTable[s, action, nexts]\r\n\r\n # fully observable MDP: observation is the actual state\r\n self.currentObservation = nexts\r\n\r\n gameOver = False\r\n if self.currentIteration > np.Inf:\r\n ob = self.reset()\r\n gameOver = True # game ends\r\n else:\r\n ob = self.get_state()\r\n\r\n history = {\"time\": self.currentIteration, \"state_t\": s, \"action_t\": action,\r\n \"reward_tp1\": reward, \"state_tp1\": nexts}\r\n # history version with actions and states, not their indices\r\n # history = {\"time\": self.currentIteration, \"action_t\": self.actionListGivenIndex[action],\r\n # \"reward_tp1\": reward, \"observation_tp1\": self.stateListGivenIndex[self.get_state()]}\r\n self.currentIteration += 1\r\n return ob, reward, gameOver, history", "def step(self, action: int):\n assert self.action_space.contains(action)\n loc = action\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n # update bord\n self.board[loc] = to_code(self.mark)\n\n # check if game has ended\n status = check_game_status(self.board)\n if status >= 0:\n self.done = True\n if status in [1, 2]:\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # update mark\n self.mark = next_mark(self.mark)\n\n return self._get_obs(), reward, self.done, None", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(self._last_observation, self.action, reward, False)\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_quantile_samples,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn,\n self._tau,\n self.optimizer)\n self.action = onp.asarray(self.action)\n return self.action", "def execute_action(self, agent, action):\n agent.bump = False\n agent.performance_measure -= 1\n \n if action == 'TurnRight':\n agent.heading = self.turn_heading(agent.heading, -1)\n elif action == 'TurnLeft':\n agent.heading = self.turn_heading(agent.heading, +1)\n elif action == 'Forward':\n self.move_to(agent, vector_add(self.heading_to_vector(agent.heading),\n agent.location))\n elif action == 'Grab':\n if self.some_things_at(agent.location, tclass=Gold):\n try:\n gold = self.list_things_at(agent.location, tclass=Gold)[0]\n agent.has_gold = True\n self.delete_thing(gold)\n except:\n print \"Error: Gold should be here, but couldn't find it!\"\n print 'All things:', self.list_things_at(agent.location)\n print 'Gold?:', self.list_things_at(agent.location, tclass=Gold)\n sys.exit(-1)\n\n elif action == 'Release':\n if agent.location == self.entrance:\n if agent.has_gold:\n agent.performance_measure += 1000\n self.done = True\n elif action == 'Shoot':\n if agent.has_arrow:\n agent.has_arrow = False\n agent.performance_measure -= 10\n self.shoot_arrow(agent)\n elif action == 'Stop':\n self.done = True\n \n print '\\nCurrent Location: ', agent.location\n print 'Heading: ', self.heading_to_str(agent.heading)\n print 'Reminder- Start Location:', self.entrance\n print ''\n print 'Percepts:'", "def step(self, action):\n res = self.reward_table.get(self.curr_state, action)\n\n self.curr_state = res['result']\n\n return res", "def perform_action(self, action):\n \n assert self.is_valid_action(action)\n \n # Save the action.\n self.action = action\n \n #the slight strategy of the opponent\n if self.reward==rLose :\n observation = self.observation\n else:\n observation = random.choice([oRock,oPaper,oScissor])\n \n #determine the result of the game and get the reward\n if action == aRock:\n if observation == oRock:\n reward= rDraw\n elif observation == oPaper:\n reward= rLose\n elif observation == oScissor:\n reward= rWin\n elif action == aPaper:\n if observation == oRock:\n reward= rWin\n elif observation == oPaper:\n reward= rDraw\n elif observation == oScissor:\n reward= rLose\n elif action == aScissor:\n if observation == oRock:\n reward= rLose\n elif observation == oPaper:\n reward= rWin\n elif observation == oScissor:\n reward= rDraw\n \n \n #Store the observation and reward in the environment.\n self.observation = observation\n \n self.reward = reward\n \n \n return (observation, reward)\n # end def", "def step (self, action):\n if self.done == 1:\n print(\"episode done\")\n return [self.state, self.reward, self.done, self.info]\n\n else:\n degree = float(action[0] * HI_ANGLE)\n loc, last_pos = self.state\n\n theta = self.fire.deg_to_rad(degree)\n pos = round(self.fire.calc_dist(theta))\n delta = abs(loc - pos)\n\n self.state[1] = pos\n self.info[\"degree\"] = degree\n self.info[\"theta\"] = round(theta, 3)\n self.info[\"delta\"] = delta\n\n self.render()\n\n if pos <= self.fire.radius:\n # realistically, the launch crew should be dead\n self.reward = -100.0\n elif delta <= self.fire.radius:\n # target hit (within blast radius)\n self.reward = 100.0\n self.done = 1;\n else:\n # reward is the \"nearness\" of the blast destroying the target\n self.reward = round(100.0 * float(abs(loc - delta)) / float(self.fire.range))\n\n return [self.state, self.reward, self.done, self.info]", "def step(self, action):\n self.steps += 1\n self.robots[0].setAction(action)\n for i in range(self.num_agents):\n if i != 0 and self.policies[i:i+1]: # self.policies[0] is dummy\n self.robots[i].setAction(self.policies[i](self.robots[i].getObservation()))\n # rewards = [ -1.0 * self.num_foods / self.max_steps for _ in range(self.num_agents) ] # so agent needs to eat foods quickly\n rewards = [ 0.0 for _ in range(self.num_agents) ]\n for i in range(self.BULLET_STEPS):\n p.stepSimulation()\n rewards = [ rewards[i]+self._getReward(self.robots[i]) for i in range(self.num_agents) ]\n self.episode_rewards = [ self.episode_rewards[i]+rewards[i] for i in range(self.num_agents) ]\n obs = self.robots[0].getObservation()\n done = self._isDone()\n info = { 'steps': self.steps }\n if done:\n # TODO\n info['episode'] = { 'r': self.episode_rewards[0], 'l': self.steps, 'r_all': self.episode_rewards }\n # print(self.episode_rewards, self.steps)\n return obs, rewards[0], done, info", "def act(self, time, observations, reward):\n return 0", "def step(self, actions, agent_id=0):\n self._last_state = self._current_state\n\n # TODO\n # action = actions.discrete_actions[0]-1\n action = actions.argmax()\n\n done = 0\n if self._stage == 0: # is fixation\n if action == 0:\n reward = 0.\n else:\n reward = -1.\n self._current_state = 1\n self._stage = 1\n elif self._stage == 1: # is first stage, use prob_transition\n if action == 1 or action == 2:\n if np.random.random() < self._prob_transition[0][action-1]:\n self._current_state = 2\n else:\n self._current_state = 3\n reward = 0.\n else: # pick a next state at random\n reward = -1.\n self._current_state = np.random.random() < 0.5 and 2 or 3\n self._stage = 2\n else: # is second stage, use prob_reward\n # Given an action (arm pulled), sample reward, return\n if action == 1 or action == 2:\n current_prob_rewards = self._prob_reward[self._current_state-2]\n self._best_reward = self._max_reward*np.max(current_prob_rewards)\n thisProb = current_prob_rewards[action-1]\n if np.random.random() < thisProb:\n # print(\"give reward\")\n reward = self._max_reward\n else:\n reward = 0.0\n else:\n reward = -1.\n\n self._total_reward += reward\n self._best_total_reward += self._best_reward\n self._stage = 0\n self._current_state = 0\n self._trial += 1\n self._since_flipped += 1\n # if more than self._min_stable trials since flipping, certain chance of flipping prob rews\n if (self._since_flipped >= self._min_stable) and (np.random.random() <= self._flip_prob):\n self._randomize()\n self._since_flipped = 0\n\n\n self._last_action = np.zeros(self._num_arms)\n self._last_action[action] = 1\n # conditions to end episode\n if self._step >= self._steps_per_ep-1:\n self._state = READY_TO_END_EPISODE\n done = 1\n\n self._step += 1\n self._prev_reward = reward\n\n obs = self._current_state\n reset = done == 1. or self._step == MAX_FRAMES\n\n # print(np.array([[obs]]).shape)\n\n # print(reward, self._stage)\n return np.array([obs]), reward, done, reset", "def step(self, action):\n if self.platform is None:\n raise RuntimeError(\"Call `reset()` before starting to step.\")\n\n if not self.action_space.contains(action):\n raise ValueError(\n \"Given action is not contained in the action space.\")\n\n num_steps = self.frameskip\n\n # ensure episode length is not exceeded due to frameskip\n step_count_after = self.step_count + num_steps\n if step_count_after > self.episode_length:\n excess = step_count_after - self.episode_length\n num_steps = max(1, num_steps - excess)\n\n reward = 0.0\n for _ in range(num_steps):\n self.step_count += 1\n if self.step_count > self.episode_length:\n raise RuntimeError(\"Exceeded number of steps for one episode.\")\n\n # send action to robot\n robot_action = self._gym_action_to_robot_action(action)\n t = self.platform.append_desired_action(robot_action)\n\n # Use observations of step t + 1 to follow what would be expected\n # in a typical gym environment. Note that on the real robot, this\n # will not be possible\n observation = self._create_observation(t + 1)\n\n reward += self.compute_reward(observation, self.info)\n\n is_done = self.step_count == self.episode_length\n\n return observation, reward, is_done, self.info", "def step(self, action, decoder_hidden):\r\n\r\n # Check whether episode is over\r\n if (action == EOS_token) or (len(self.predicted_words)>= self.max_length):\r\n self.state = action, decoder_hidden\r\n RL_model_reward = model_evaluation.performance_metrics(\r\n target_sentence=self.target_sentence, pred_sentence=self.pred_sentence(), \r\n similarity_model=self.similarity_model, fluency_model=self.fluency_model, ESIM_model=self.ESIM_model,\r\n logr_model=self.logr_model, std_scaler=self.std_scaler,\r\n similarity_dist=self.similarity_dist, fluency_dist=self.fluency_dist, ESIM_dist=self.ESIM_dist,\r\n vocab_index=vocab_index, metric=self.reward_function)\r\n # Calculate relative reward\r\n self.ep_reward = np.around(RL_model_reward, 3)\r\n self.done = 1\r\n else:\r\n self.state = action, decoder_hidden\r\n \r\n # Add word to pred words\r\n self.predicted_words.append(vocab_index.index2word[action.item()])\r\n \r\n return self.state, self.ep_reward, self.done, None", "def step(self, action:list) -> (np.ndarray, float, bool, dict):\n\n\n action[0] = (action[0] + 1)/2 # Done to be compatible with RL algorithms that require symmetric action spaces\n if np.isnan(action).any(): action = np.zeros(action.shape)\n self.main_vessel.step(action)\n\n for vessel in self.moving_obstacles:\n if vessel.index != 0:\n obs = vessel.observe()\n reward = self.rewarder_dict[vessel.index].calculate()\n insight = self.rewarder_dict[vessel.index].insight()\n #print(f'Reward for vessel {vessel.index}: {reward} -- lambda: {insight}')\n obs = np.concatenate([insight,obs])\n action, _states = self.agent.predict(obs, deterministic=True)\n action[0] = (action[0] + 1)/2\n vessel.step(action)\n\n # Testing criteria for ending the episode\n done = self._isdone()\n self._save_latest_step()\n\n # Getting observation vector\n obs = self.observe()\n vessel_data = self.main_vessel.req_latest_data()\n self.collision = vessel_data['collision']\n self.reached_goal = vessel_data['reached_goal']\n self.progress = vessel_data['progress']\n\n # Receiving agent's reward\n reward = self.rewarder.calculate()\n self.last_reward = reward\n #self.cumulative_reward += reward\n\n info = {}\n info['collision'] = self.collision\n info['reached_goal'] = self.reached_goal\n info['progress'] = self.progress\n\n self.t_step += 1\n\n return (obs, reward, done, info)", "def step(self, action):\n action = self.randomization.action_randomizer.randomize(\n action, self._random_state\n )\n\n robot_exception = None\n try:\n self._act(action)\n except RobotException as re:\n logger.error(\n f\"Robot raised exception: {str(re)}. This will finish the current episode.\"\n )\n robot_exception = re\n\n if not self.constants.physical:\n # We don't need to do stepping for physical roll out.\n self.mujoco_simulation.step()\n\n self._synchronize_step_time()\n self.t += 1\n\n obs, reward, done, info = self.get_observation(robot_exception=robot_exception)\n obs, reward, done, info = self.step_finalize(obs, reward, done, info)\n return obs, reward, done, info", "def _step(self, state, action):\n dealer_hand, player_hand = state[0], state[1]\n\n if action == \"hit\":\n player_hand = self._play_card(player_hand)\n else:\n dealer_hand = self._dealer_step(dealer_hand)\n self._terminate = True\n\n reward = self._get_reward(dealer_hand, player_hand)\n\n if reward != 0:\n self._terminate = True\n\n return [dealer_hand, player_hand], reward", "def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n self.microgridPolicy.improveAction(action);\n\n self.microgrid.update();\n\n self.updateState();\n done = self.microgridPolicy.verifyStopConditions();\n reward = self.microgridPolicy.computeReward(done)\n if done: \n if self.steps_beyond_done is None:\n self.steps_beyond_done = 0\n else:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n self.clock.increaseTimeStep();\n return self.state, reward, done, {}", "def step(self, action):\n self.timestep += 1\n self.actions = action.ravel()\n\n # Figure out which action was taken\n self.acted = False\n self.eat = False\n self.discard = False\n if action[0] > .5:\n self.eat = True\n self.acted = True\n elif action[1] > .5:\n self.discard = True\n self.acted = True\n\n # Check whether the appropriate action was taken, and assign reward.\n # There is a small punishment for doing nothing.\n self.reward = -.1\n if ((self.eat and self.edible) or\n (self.discard and not self.edible)):\n self.reward = 1.\n elif ((self.eat and not self.edible) or\n (self.discard and self.edible)):\n self.reward = -.9\n\n if self.acted:\n self.grab_fruit()\n\n return self.sensors, self.reward", "def run_one_step(self):\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n if self.env.check_terminal() is False:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n self.env.update(action)\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n self.agent.update_history(copy.copy(latest_experience))\n self.count += 1\n # If the latest history has a large enough batch, perform an update\n # CHECK IF THIS IS THE RIGHT METHOD\n if self.count % self.batch_size == 0:\n self.agent.update_policy_ordered(max(1, self.batch_size))\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.count = 0\n self.agent.policy.learner.update_target_network()\n else:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n # Perform an update on all of the previous experiences that haven't been updated\n if self.count % self.batch_size > 0:\n self.agent.update_policy_ordered((self.count % self.batch_size) + 1)\n self.count = 0\n return reward", "def takeAction(self, action):\n return self.env.step(action)", "def step(self, agent_action):\n\n done = False\n self.round += 1\n # First check round num, if equal to max then fail\n if self.round == self.max_round:\n done = True\n success = FAIL\n user_response = self._end_response()\n else:\n try:\n success = self.update_state(agent_action)\n if success:\n user_response = self._end_response()\n else:\n agent_intent = agent_action['intent']\n assert agent_intent in self.user_responses, 'Not acceptable agent action'\n user_response = self.user_responses[agent_intent](agent_action)\n except Exception:\n return self._default_response(),-5,False,False\n\n reward = self.reward_function(agent_action, success)\n\n return user_response, reward, done, True if success is 1 else False", "def oneActionReward(self, state, action, redTurn):\n\n # TODO this is the main bottleneck for the neural network learning part\n # the only way to speed this up, would be to reduce the number of calls to networks\n # or optimize how the networks are called\n\n # save the original game\n oldGame = self.game\n self.game = state\n self.gameEnv.game = state\n oldCurrent = self.current\n\n # initialize reward for this action\n totalReward = 0\n\n # if the given action is None, then an action must be determined\n if action is None:\n # determine which enemy piece will move\n self.gameEnv.performAction(self.gameNetwork)\n\n # determine the direction that piece will move\n # if there is no valid network input, do not make an action\n netInput = self.toNetInput()\n # find the actual action\n if netInput is not None:\n action = self.internalNetwork.chooseAction(netInput, takeAction=self.canTakeAction)\n else:\n # This shouldn't ever be reached, because if self.toNetInput() returns None,\n # then self.current must be None, which only should happen after\n # self.gameEnv.performAction(self.gameNetwork) is called, but it should\n # only set that if the game is over, so if that first move ended the game\n # then this part of the code should never be reached\n # If this line is reached, then some error has happened\n action = None\n\n piecePos = self.current\n\n # if a move cannot be made, ensure win conditions are checked\n if action is None:\n self.game.checkWinConditions()\n # if an action can be taken, then the game is not over, so make a move\n else:\n # find the modifiers for the action\n modifiers = moveIntToBoolList(action)\n\n # add the reward for the piece moving\n moveR = moveReward(self.game, piecePos, modifiers, redTurn)\n if moveR is not None:\n totalReward += moveR\n # make the move\n self.game.play(piecePos, modifiers)\n\n # if the game ends, add reward for winning\n winReward = endGameReward(self.game.win, redTurn, self.game.moves)\n if winReward is not None:\n totalReward += winReward\n else:\n # if no move reward was found, then there was no valid reward, so set the reward to None\n totalReward = None\n\n # put the game back to it's original state\n self.game = oldGame\n self.gameEnv.game = oldGame\n self.current = oldCurrent\n\n # return the final reward\n return totalReward", "def act(self, observation, reward, done):\n if self._not_restarted(observation):\n # not the first action, remember it and update model\n self._remember(self.prev_action, reward, observation, done)\n if len(self.replay_memory) > self.batch_size:\n self._replay()\n\n # determine the next action if not yet done\n action = None\n\n if not done:\n # epsilon greedy\n if random.uniform(0, 1) < self.epsilon_policy.get():\n # exploration: random action\n action = self.action_space.sample()\n action['start_time'] += 1 # non-zero start times\n else:\n # exploitation\n action = self._get_best_action(observation)\n\n self.prev_observation = observation\n self.prev_action = action\n\n return action", "def step(self, action):\n (self.state, self.reward, self.terminal, self.truncated,\n self.info) = self.env.step(action)\n\n return self.state, self.reward, self.terminal, self.truncated, self.info", "def step(self):\n self.step_n += 1\n self.step_t += 1\n # TODO: directly calling agent.act will by-pass BaseDeepAgent, which\n # checks and assigns 'sess' arugment. So we manually set sess here. But\n # is there a better way to do this?\n self.action = self.agent.act(\n state=self.state, sess=self.agent.sess\n )\n next_state, vec_reward, done, _ = self.env.step(self.action)\n reward, done = func_compile_exp_agent(self.action, vec_reward, done)\n self.total_reward = reward + self.reward_decay * self.total_reward\n info = self.agent.step(\n state=self.state, action=self.action, reward=reward,\n next_state=next_state, episode_done=done\n )\n self.record(info)\n flag_success = True if done and reward > 0.0 else False\n if self.savedir is not None:\n self.steps_saver.save(self.episode_n, self.step_t, self.state, self.action,\n vec_reward, reward, done, self.total_reward, flag_success)\n self.state = next_state\n if done:\n self.step_t = 0\n return done", "def _reward(self, action):\n raise NotImplementedError", "def step(self, state, reward, training=True):\n last_state, last_action = self.last_state, self.last_action\n last_reward = reward\n state = state\n \n action = self.policy(state, training)\n\n if training:\n self.steps += 1\n print(\"## step:\",self.steps)\n\n if last_state is not None:\n experience = {\n \"state\": last_state,\n \"action\": last_action,\n \"reward\": last_reward,\n \"next_state\": state\n }\n\n self.memory.add(experience)\n #print(\"**memory size:\",self.memory.__len__())\n #else:\n #print(\"&& last_state\",last_state)\n\n if self.steps > self.replay_start_size: #para acumular cierta cantidad de experiences antes de comenzar el entrenamiento\n self.train_network()\n\n if self.steps % self.target_update_freq == 0: #el clon de la red se realiza cada cierta cant de steps\n self.update_target_network()\n\n self.last_state = state\n self.last_action = action\n\n return action", "def step(self, action):\n self._robot.send_command(action)\n\n obs = self.get_observation()\n\n reward = self.reward(obs.achieved_goal, self.goal)\n done = self.done(obs.achieved_goal, self.goal)\n next_observation = obs.observation\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action: CARLAAction, *args: Any, **kwargs: Any) -> Transition:\n observation, reward, done, info = self.env.step(action)\n if observation[\"lane_invasion\"] > 0:\n logging.debug(\"A lane was invaded\")\n done = True\n reward = -1.0\n return observation, reward, done, info", "def perform_action(self, action):\r\n t_list = self.get_action_outcomes(self.current_state, action)\r\n new_state = t_list[np.argmax(np.random.multinomial(1, [t[0] for t in t_list]))][1]\r\n # print(len(self.trajectory), ':', self.current_state, '--', action ,'-->', new_state)\r\n self.current_state = new_state\r\n self.trajectory.append(new_state)\r\n return tuple(self.current_state) == tuple(self.end_state)", "def step(self, action):\n # Action indicates the position of a datapoint in self.indeces_unknown \n # that we want to sample in unknown_data\n # The index in train_data should be retrieved \n selection_absolute = self.indeces_unknown[action]\n # Label a datapoint: add its index to known samples and removes from unknown\n self.indeces_known = np.concatenate(([self.indeces_known, np.array([selection_absolute])]))\n self.indeces_unknown = np.delete(self.indeces_unknown, action) \n # Train a model with new labeled data\n known_data = self.dataset.train_data[self.indeces_known,:]\n known_labels = self.dataset.train_labels[self.indeces_known]\n known_labels = np.ravel(known_labels)\n self.model.fit(known_data, known_labels)\n # Get a new state \n classifier_state, next_action_state = self._get_state() \n # Update the number of available actions\n self.n_actions = np.size(self.indeces_unknown)\n # Compute the quality of the current classifier\n test_prediction = self.model.predict(self.dataset.test_data)\n new_score = self.quality_method(self.dataset.test_labels, test_prediction)\n self.episode_qualities.append(new_score)\n # Compute the reward\n reward = self._compute_reward()\n # Check if this episode terminated\n done = self._compute_is_terminal() \n return classifier_state, next_action_state, reward, done", "def update(self, state, action, nextState, reward):\n util.raiseNotDefined()", "def take_action(self, state):\n action = super(SarsaAgent, self).take_action(state)\n if self.learning:\n self.update_q_values(state, self.q_value((state, action)))\n self.prev_state = state\n self.prev_action = action\n self.prev_q_val = self.q_values[self.represent_state(self.prev_state), self.prev_action]\n self.log(\"size of q_values {0}\\nprev state {1}\\nprev action {2}\\nprev q-val {3}\"\n .format(len(self.q_values), self.prev_state, self.prev_action, self.prev_q_val))\n return action", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action):\n # get the instruction indicated by the action\n instr = self.instrs[action]\n # extend the program\n self.program.inst(instr)\n # run and get some measured bitstrings\n self.bitstrings, info = self._run_program(self.program)\n # compute the avg score of the bitstrings\n reward = self._prob_score(self.bitstrings)\n self.running_episode_reward += reward\n\n info[\"instr\"] = instr\n info[\"reward-nb\"] = reward\n self.current_step += 1\n\n # are we done yet?\n done = False\n if self.current_step >= MAX_PROGRAM_LENGTH:\n done = True\n if reward >= self.reward_threshold:\n reward += MAX_PROGRAM_LENGTH - self.current_step\n done = True\n\n return self.observation, reward, done, info", "def step(self, action):\n # print(action)\n distances = self.agent.return_distances(self.agent.corners, self.agent.line_pos)\n\n left = distances[0]\n right = distances[1]\n self.agent.distances.append({\n 'left': left,\n 'right': right\n })\n reward = 0\n if action == 1:\n self.agent.angle -= 90\n if self.agent.angle < 0:\n self.agent.angle = 0\n self.agent.direction_history.append('left')\n self.reset_raycasts(self.agent.angle)\n self.render()\n if left > right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 2:\n self.agent.angle += 90\n if self.agent.angle >= 360:\n self.agent.angle = 0\n\n self.reset_raycasts(self.agent.angle)\n self.render()\n self.agent.direction_history.append('right')\n if left < right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 0:\n self.agent.direction_history.append('forward')\n if self.agent.angle >= 360: self.agent.angle == 0\n if self.agent.angle == 0 or self.agent.angle == 360:\n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n \n if left + right >= 50:\n reward += 5\n\n self.render()\n\n elif action == 3:\n self.agent.direction_history.append('reverse')\n if self.agent.angle == 0:\n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n \n if left + right <= 50:\n reward += 5\n\n \n else:\n reward -= 5\n\n if \"forward\" not in self.agent.direction_history[len(self.agent.direction_history)-6:len(self.agent.direction_history)-1]:\n reward -= 10\n\n \n info = {}\n if self.agent.check_collision():\n reward -= 10\n self.reset() \n self.agent.rewards.append({\n 'leftDistance': left,\n 'rightDistance': right,\n 'reward': reward,\n })\n self.render()\n print(f\"REWARD: {reward}\")\n # self.render()\n # print(self.agent.direction_history[-1])\n self.agent.rewards.append(reward)\n return np.array([left, right]), reward, False, info", "def take_action(self, state):\n if self.epsilon_decay is not None:\n self.epsilon *= self.epsilon_decay\n if random.random() < self.epsilon:\n action = super(BaseQAgent, self).random_next_action(state)\n self.log('exploration move: {0}'.format(str(action)))\n else:\n action = self.greedy_next_action(state)\n self.log('exploitation move: {0}'.format(str(action)))\n return action", "def step(self, action):\n if self._backend_agent:\n self._backend_agent._on_gym_step_begin(self, action)\n\n result = self.env.step(action)\n (state, reward, done, info) = result\n self.steps_done_in_episode += 1\n self.steps_done_in_instance += 1\n self.total.steps_done_inc()\n if self.max_steps_per_episode and self.steps_done_in_episode >= self.max_steps_per_episode:\n done = True\n result = (state, reward, done, info)\n if not self.is_episode_done and done:\n self.is_episode_done = True\n self.episodes_done += 1\n self.total.episodes_done_inc()\n\n if self._backend_agent:\n self._backend_agent._on_gym_step_end(self, action, result)\n return result", "def step(self, action):\n possible_states, probs = zip(*self.get_next_states(self._current_state, action).items())\n next_state = weighted_choice(possible_states, p=probs)\n reward = self.get_reward(self._current_state, action, next_state)\n is_done = self.is_terminal(next_state)\n self._current_state = next_state\n return next_state, reward, is_done, {}", "def begin_episode(self, observation):\n self._reset_state()\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_quantile_samples,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn,\n self._tau,\n self.optimizer)\n self.action = onp.asarray(self.action)\n return self.action", "def _act_impl(self, observation, reward,\n done):\n if done:\n raise core.EpisodeDoneError(\"Called act on a done episode.\")\n\n if not self.observation_space.contains(observation):\n raise core.InvalidObservationError(\"Invalid ovservation: %s\" %\n observation)\n if self.params.observation_adjustment_fn:\n observation = self.params.observation_adjustment_fn(\n self.rng, self.beliefs, observation)\n\n features = self.feature_selection_fn(observation)\n self.beliefs = self._update_beliefs(features, self.beliefs)\n action = self._allocate(self._n_resource, self.beliefs)\n\n if not self.action_space.contains(action):\n raise gym.error.InvalidAction(\"Invalid action: %s\" % action)\n\n return action", "def step(self, action):\n obs, r, done, info = self.env.step(action)\n obs = self.get_observation(obs)\n return obs, r, self.is_done(), info", "def step(self, action):\n state, reward, done, debug_info = self.sample_transition(action)\n self.set_state(state)\n if \"next_state_heuristic\" in debug_info:\n self._current_heuristic = debug_info[\"next_state_heuristic\"]\n return state, reward, done, debug_info", "def step(self, action):\n \"\"\" Action is a motion command \"\"\"\n rich_obs, reward, done, info = super(ColoredEgoCostmapRandomAisleTurnEnv, self).step(action)\n obs = self._extract_egocentric_observation(rich_obs)\n return obs, reward, done, info", "def step(self, action: nx.Graph):\n # Get the SMILES string associated with this action\n self._state = action\n if self.record_path:\n self._path.append(self._state)\n\n # Update the action space\n self.action_space.update_actions(self._state, self.observation_space)\n self._counter += 1\n\n # Check if we have finished\n # Out of steps or no more moves\n done = len(self.action_space.get_possible_actions()) == 0\n\n # Compute the fingerprints for the state\n return self._state, self.reward(), done, {}", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def step(self,\n action):\n assert isinstance(action, Actions)\n reward = 0.0\n done = False\n close = self.current_close()\n # if buy a share the state change and you pay the commission (no price slippage)\n if action == Actions.Buy and self.have_position:\n self.have_position = True\n self.open_price = close\n reward -= self.commission_perc\n # if close pay the commission if done = reset_on_close change the done flag(end) and give the reward\n elif action == Actions.Close and self.have_position:\n reward -= self.commission_perc\n done |= self.reset_on_close\n if self.reward_on_close:\n reward += 100.0 * (close - self.open_price) / self.open_price\n self.have_position = False\n self.open_price = 0.0\n # end the process: reward after the last bar movement\n self._offset += 1\n prevision_close = close\n close = self.current_close()\n done |= self._offset >= self._prices.close.shape[0] - 1\n if self.have_position and not self.reward_on_close:\n reward += 100.0 * (close - prevision_close) / prevision_close\n return reward, done", "def learn(self, action, reward, is_terminal):\n\n\t\t# Add the experience to the replay memory\n\t\tself.replay_memory.record(self.state_history[:,:,3], action, reward, is_terminal)", "def step(self, action: Action) -> Feedback: # type: ignore\n self._action_counter += 1\n step_id = self._action_counter\n\n self._encode_and_send_action(action, step_id)\n\n # Wait (blocking!) for the response envelope from the environment\n in_envelope = self._queue.get(block=True, timeout=None) # type: Envelope\n\n msg = self._decode_percept(in_envelope, step_id)\n\n observation, reward, done, info = self._message_to_percept(msg)\n\n return observation, reward, done, info", "def step(self, state, action, reward, next_state, done):\n self.episode_step += 1\n # if early episode termination\n if self.params.max_episode_steps and self.episode_step >= self.params.max_episode_steps:\n done = True\n # track progress\n self.tracker.step(reward)\n # memorize\n self.memory.append((state, action, reward, next_state, 1.0 - done))\n return done", "def execute_action(self, agent, action):\n abstract", "def take_action(self, state):", "def step(self, action: Union[np.ndarray, torch.Tensor]):\n if type(action) == torch.Tensor:\n action = action.squeeze().numpy()\n\n if not type(action) is np.ndarray:\n raise Exception(\"The action must be a Numpy array but is of type %s (value = %s)\" % (type(action), action))\n\n if self.increment_actions and not self.action_space.contains(action):\n action = action.clip(self.action_space.low, self.action_space.high)\n\n # Additionally, we must make sure the value will stay in the range\n # min <= x + action <= max\n if self.increment_actions:\n current_values = self.x[np.array([0, 1, 3, 5])]\n new_flow_values = current_values + action\n else:\n new_flow_values = action\n\n new_flow_values = np.clip(new_flow_values, self.flows_lower_bounds, self.flows_upper_bounds)\n self.update_all_flows(new_flow_values)\n\n if any([x < 0 for x in self.x]):\n pass\n # TODO: should I clip the actions to ensure the flows are always positive?\n # raise Exception(f\"Negative flows! x = {[round(x, 4) for x in self.x]}\")\n\n self.update_fitness()\n\n self.step_number += 1\n\n # reward = self.fitness - self.previous_fitness\n reward = self.fitness\n observation = self.get_observation()\n\n done = (self.step_number == self.total_number_of_episodes)\n info = {}\n return observation, reward, done, info", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]:\n next_state, reward, done, info = self.env.step(action)\n return next_state, reward, done, info", "def step(self, action: CARLAAction, *args: Any, **kwargs: Any) -> Transition:\n observation, reward, done, info = self.env.step(action)\n if observation[\"collision\"] > 0:\n logging.debug(\"A collision occured\")\n done = True\n reward = -1.0\n return observation, reward, done, info", "def step(self, action):\n self.move_step(action) # Move.\n r, d = self.check_goal() # Check the reward and done state, and create\n # new environment.\n s_new= self.render_env() # Render the new environment.\n return s_new, r, d", "def observe_reward(self,state,action,statePrime,reward):\n pass", "def begin_episode(self, observation):\n self._reset_state()\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def step(self, action):\n observation, reward, done, _ = self.env.step(action)\n return np.array(observation), reward, done", "def env_step(self, action):\n random_prob = np.random.uniform(0, 1)\n if random_prob <= self.stochasticity: # Ignore agent's action and move to one of the 8 neighbours\n # Determine how the agent moves (from -1 to 1 in each direction, but not both 0)\n random_nn = np.random.randint(0, len(self.nn))\n random_y = self.nn[random_nn, 0]\n random_x = self.nn[random_nn, 1]\n\n # Move to one of the nearest neighbours\n self.current_state[0] += random_y\n self.current_state[1] += random_x\n else: # Perform agent's action\n # Update current stated based on the action the agent took\n curr_x = self.current_state[1]\n self.current_state[0] += self.actions[action][0] + self.wind[curr_x]\n self.current_state[1] += self.actions[action][1]\n\n # Check if the agent fell out of the boundaries of the grid world\n y_coord = self.current_state[0]\n x_coord = self.current_state[1]\n\n if y_coord >= self.num_rows: # Agent went too far up\n self.current_state[0] = self.num_rows - 1\n elif y_coord < 0: # Agent went too far down\n self.current_state[0] = 0\n\n if x_coord >= self.num_cols: # Agent went too far right\n self.current_state[1] = self.num_cols - 1\n elif x_coord < 0: # Agent went too far left\n self.current_state[1] = 0\n\n is_terminal = False\n reward = -1.0\n\n # Check if the agent reached a terminal state\n if self.current_state == self.terminal_state:\n is_terminal = True\n reward = 0.0\n\n return reward, self.current_state, is_terminal", "def step(self, state, action, reward, done):\n\n self.memory.add(state, action, reward, done)\n if done and self.n_tau % self.update_freq == 0:\n self.n_tau += 1\n return self.update()\n return None", "def step(self, action):\n\n previous_state = self.state\n self._make_action(action) \n self.step_simulation()\n self._make_observation() # Update state\n \n ###################\n ### Reward function\n\n body_position = self.agent.get_position('Torso') # x,y,z coordinates of the agent\n r_foot_collision, l_foot_collision = self.state[-2:] # Feet collision indicators [0/1]\n roll, pitch = self.state[12:14] # Roll and pitch of the agent's convex hull\n\n # Staying upright\n posture = 0\n if abs(roll) > abs(previous_state[12]):\n posture -= .1\n else:\n posture += .125\n\n if abs(pitch) > abs(previous_state[13]):\n posture -= .1\n else:\n posture += .125\n \n hull = 0\n if abs(roll) < .125 and abs(pitch) < .125:\n posture += .1\n # Lifting feet while upright\n # collisions = np.count_nonzero(self.state[14::])\n # posture = (2 - collisions) * .\n\n # Hull location\n progress = body_position[0] - self.previous_body_position\n if progress > 0: \n hull = 0.1 + progress * 40\n if hull > .5: hull = .5\n else:\n hull = -0.1 + progress * 40\n if hull < -.5: hull = -.5\n self.previous_body_position = body_position[0]\n\n \"\"\"\n STATE SPACE:\n include:\n 1. Angular velocity of the torso (also normal velocity?) both can be obtained through gyro and accelerometer\n 2. Change to orientation of the torso instead of convex hull\n 3. \n \"\"\"\n\n # Feet distance\n # Use multiplicative reward?\n # Change in feet position along the X axis\n # pos_lfoot = self.agent.get_position('LFoot')[0]\n # pos_rfoot = self.agent.get_position('RFoot')[0]\n # distance_lfoot = (pos_lfoot - self.previous_feet_position[0])\n # distance_rfoot = (pos_rfoot - self.previous_feet_position[1])\n # if self.previous_feet_position[0] != 0:\n # feet_distance = (distance_lfoot + distance_rfoot) * 100\n # else:\n # feet_distance = 0\n\n # self.previous_feet_position = [pos_lfoot, pos_rfoot]\n\n base = 0.05\n reward = base + posture + hull\n # print('hull: {}'.format(hull))\n # print('posture: {}'.cformat(posture))\n\n # End condition\n if (abs(roll) > self.fall_threshold or abs(pitch) > self.fall_threshold):\n reward -= 2\n self.done = True \n\n # print('Posture: {} \\n Hull: {}'.format(posture, hull))\n # print('Total reward: {}'.format(reward))\n\n return self.state, reward, self.done, {}", "def step(self, state, action, reward, next_state, done):\n # print(\"agent.step() - state: {}, action: {}, reward: {}, next_state: {}, done: {}\".format(state, action, reward, next_state, done))\n current_reward_val = self.Q[state][action]\n next_step_greedy_action = np.argmax(self.Q[next_state])\n next_step_reward_val = self.Q[next_state][next_step_greedy_action]\n gamma = 0.75 # toto - paramterize\n self.Q[state][action] = self.Q[state][action] + self.alpha * (\n reward + gamma * next_step_reward_val - self.Q[state][action])", "def take_one_step(env, policy, state):\n action = sample_action(policy, state)\n new_state, reward, done, _ = env.step(action)\n if (done == True and reward > 0):\n reward = 10.0\n if new_state in hole_states:\n reward = -10.0\n elif not done:\n reward = -1.0\n # print(\"state {0}, new_state {1}, action {2}, reward {3}, done {4}\".format(state, new_state, action, reward, done))\n return action, reward, new_state, done", "def step(self, action): # action is nb-cops-sized or 1-sized\n reward = 0\n done = False\n\n action = np.array(action)\n\n def old_pos(set=None):\n if set is None:\n return self.cops_pos if self.is_cops_turn else self.rob_pos\n else:\n if self.is_cops_turn:\n self.cops_pos = action\n else:\n self.rob_pos = action\n\n invalids = []\n\n if self.is_first_turn:\n self.graph.set_cr(action, self.is_cops_turn)\n else:\n edges = self.graph.get_rep()[old_pos(), action]\n invalids = edges != 1\n invalids[action == old_pos()] = False\n invalids = np.where(invalids == True)[0]\n if invalids.shape[0] != 0:\n action[invalids] = old_pos()[invalids] # correct action\n self.graph.set_cr(action, self.is_cops_turn)\n\n old_pos(action)\n if not self.is_cops_turn and self.is_first_turn:\n self.is_first_turn = False\n self.is_cops_turn = not self.is_cops_turn\n if self.rob_pos is not None and self.rob_pos[0] in self.cops_pos:\n print(\"Cops won\")\n done = True\n reward += (1 if self.is_cops_turn else -1) * REWARD_END_WL\n\n reward += (-1 if self.is_cops_turn else +1) * REWARD_STEP_WL\n reward -= len(invalids) * REWARD_INVALID\n\n observation = self.graph.get_attr()\n\n if self.is_cops_turn:\n self.cops_rew += reward\n else:\n self.rob_rew += reward\n\n if not done:\n if self.is_cops_turn and self.cops is not None:\n observation, _, done, _ = self.step(self.cops.act(observation))\n elif not self.is_cops_turn and self.robber is not None:\n observation, _, done, _ = self.step(self.robber.act(observation))\n return observation, reward, done, {}", "def rollout(agent, env):\n # run until episode ends\n episode_reward = 0\n done = False\n obs = env.reset()\n while not done:\n action = agent.compute_action(obs)\n obs, reward, done, info = env.step(action)\n episode_reward += reward\n \n return episode_reward", "def env_step(self, action):\n if action == 0: # Hit\n\n new_state = deepcopy(self.current_state)\n reward = 0\n terminal = False\n \n new_card = min(self.random.randint(1,14), 10)\n # print('new card:', new_card)\n \n if new_card == 1:\n self.player_ace_count += 1\n new_state['player_sum'] = self.current_state['player_sum'] + 11 \n else:\n new_state['player_sum'] = self.current_state['player_sum'] + new_card\n\n while new_state['player_sum'] > 21 and self.player_ace_count > 0:\n self.player_ace_count -= 1\n new_state['player_sum'] -= 10\n\n new_state['usable_ace'] = int(self.player_ace_count > 0)\n\n if new_state['player_sum'] > 21: # Goes bust\n reward = -1\n terminal = True\n\n elif action == 1: # Stick\n\n new_state = deepcopy(self.current_state)\n terminal = True\n\n if self.current_state['dealer_card'] == 1:\n dealer_ace = 1\n dealer_sum = 11\n else:\n dealer_ace = 0\n dealer_sum = self.current_state['dealer_card']\n\n first_two_cards = True\n while dealer_sum < self.dealer_sticks or first_two_cards:\n first_two_cards = False\n # new_card = self.random.choice(range(1,11), p=self.card_probs)\n new_card = min(self.random.randint(1,14), 10)\n if new_card == 1:\n dealer_sum += 11\n dealer_ace += 1\n else:\n dealer_sum += new_card\n\n while dealer_sum > 21 and dealer_ace > 0:\n dealer_sum -= 10\n dealer_ace -= 1\n dealer_ace = int(dealer_ace > 0)\n # print('dealer:', new_card)\n\n # print('dealer sum:', dealer_sum)\n if dealer_sum > 21:\n reward = 1\n else:\n if new_state['player_sum'] > dealer_sum:\n reward = 1\n elif new_state['player_sum'] < dealer_sum:\n reward = -1\n else:\n reward = 0\n # reward = int(new_state['player_sum'] > dealer_sum) - int(new_state['player_sum'] < dealer_sum)\n\n else:\n raise Exception(\"Invalid action.\")\n\n self.current_state = new_state\n\n self.reward_obs_term = (reward, self.observation(self.current_state), terminal)\n\n return self.reward_obs_term", "def reward(self, observation, action, reward):\r\n\r\n if reward > 0 :\r\n print(\"win\") \r\n self.done=1 \r\n self.done_MC=1\r\n\r\n else:\r\n self.done=0\r\n\r\n self.current_state=observation\r\n self.current_action=action\r\n self.current_reward=reward\r\n self.reward_list.append(reward.item())\r\n \r\n self.log_probs.append(self.log_prob)\r\n self.values.append(self.value)\r\n self.rewards.append(self.torch.tensor([self.current_reward], dtype=self.torch.float, device=self.device))\r\n self.masks.append(self.torch.tensor([1-self.done], dtype=self.torch.float, device=self.device))", "def state_update(self, last_action, action):\n s_t = self._get_state(last_action)\n r= self._get_reward(last_action,action)\n self._move_user()\n s_t1 = self._get_state(action)\n self.reward = r\n self.s_t = s_t\n self.s_t1 = s_t1", "def make_step(self, action_index):\n # Randomly sample action_index if world is stochastic\n if np.random.uniform(0, 1) < self.random_move_probability:\n action_indices = np.arange(self.num_actions, dtype=int)\n action_indices = np.delete(action_indices, action_index)\n action_index = np.random.choice(action_indices, 1)[0]\n\n action = self.actions[action_index]\n\n # Determine new position and check whether the agent hits a wall.\n old_position = self.agent_position\n new_position = self.agent_position\n if action == \"UP\":\n candidate_position = old_position + self.num_cols\n if candidate_position < self.num_fields:\n new_position = candidate_position\n elif action == \"RIGHT\":\n candidate_position = old_position + 1\n if candidate_position % self.num_cols > 0: # The %-operator denotes \"modulo\"-division.\n new_position = candidate_position\n elif action == \"DOWN\":\n candidate_position = old_position - self.num_cols\n if candidate_position >= 0:\n new_position = candidate_position\n elif action == \"LEFT\": # \"LEFT\"\n candidate_position = old_position - 1\n if candidate_position % self.num_cols < self.num_cols - 1:\n new_position = candidate_position\n else:\n raise ValueError('Action was mis-specified!')\n\n # Update the environment state\n self.agent_position = new_position\n \n # Calculate reward\n reward = self.rewards[self.agent_position]\n reward -= 1\n return reward, new_position", "def step(self, action, state):\r\n done = False\r\n next_state = json.loads(state)\r\n state_shift = self.action_map[action]\r\n\r\n next_state = np.array(next_state) + np.array(state_shift)\r\n next_state = next_state.tolist()\r\n\r\n reward = self.reward[next_state[0], next_state[1]]\r\n self.reward[next_state[0], next_state[1]] = 0\r\n\r\n next_state = json.dumps(next_state)\r\n\r\n if reward < 0 or reward == 4:\r\n done = True\r\n\r\n return next_state, reward, done", "def doAction(self, gameState, action):\n self.lastState = gameState\n self.lastAction = action", "def step(self, state, action):\n reward = 0.0\n new_card = self._generate_random_card()\n if action == ACTION_HIT:\n new_player_value = state.player_sum + new_card.get_game_value()\n if new_player_value < 1 or new_player_value > 21:\n return TERMINAL_STATE, -1\n return State(state.dealer_card, new_player_value), 0.0\n current_dealer_value = state.dealer_card.get_game_value()\n # STICK\n current_dealer_value += new_card.get_game_value()\n while current_dealer_value < 17:\n if current_dealer_value < 1:\n return TERMINAL_STATE, 1\n drawn_card = self._generate_random_card()\n current_dealer_value += drawn_card.get_game_value()\n if current_dealer_value > 21:\n return TERMINAL_STATE, 1\n if state.player_sum > current_dealer_value:\n return TERMINAL_STATE, 1\n if state.player_sum == current_dealer_value:\n return TERMINAL_STATE, 0\n return TERMINAL_STATE, -1", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n next_state, reward, done, _ = self.env.step(action, resize=RESIZE, size = RESIZE_SIZE)\n\n if not self.is_test:\n self.transition += [reward, next_state, done]\n \n # N-step transition\n if self.use_n_step:\n one_step_transition = self.memory_n.store(*self.transition)\n # 1-step transition\n else:\n one_step_transition = self.transition\n\n # add a single step transition\n if one_step_transition:\n self.memory.store(*one_step_transition)\n \n return next_state, reward, done" ]
[ "0.77299076", "0.7505443", "0.7489188", "0.7451101", "0.7446967", "0.7423517", "0.7353827", "0.73296374", "0.732902", "0.7246652", "0.71973366", "0.71956855", "0.7170794", "0.7170417", "0.7162732", "0.71451074", "0.7140692", "0.71406287", "0.71278226", "0.71219754", "0.71109253", "0.7090877", "0.70869845", "0.7085137", "0.7063677", "0.7054138", "0.70449317", "0.70366824", "0.7033589", "0.7025373", "0.7006869", "0.7001018", "0.6990449", "0.69802225", "0.69578475", "0.6956955", "0.6933207", "0.69265133", "0.6925393", "0.6921236", "0.69206285", "0.6916522", "0.6915627", "0.69097584", "0.6894213", "0.68863004", "0.6885706", "0.687405", "0.6873803", "0.6870774", "0.6870652", "0.6862348", "0.68557715", "0.68502146", "0.684196", "0.683575", "0.6832626", "0.68091553", "0.6806329", "0.68055475", "0.67869866", "0.6778148", "0.67703205", "0.6751887", "0.6743675", "0.6734342", "0.6728205", "0.6722045", "0.67154217", "0.67139834", "0.670993", "0.6702935", "0.6696547", "0.6694851", "0.66893286", "0.6681997", "0.66788584", "0.66726816", "0.6658906", "0.66587865", "0.6657755", "0.6654369", "0.665341", "0.66513664", "0.66490126", "0.6646968", "0.66443723", "0.6643454", "0.6641641", "0.6638604", "0.6631035", "0.6608841", "0.6605315", "0.6604984", "0.66026354", "0.6594905", "0.6593819", "0.6593525", "0.65921265", "0.6588481", "0.6587842" ]
0.0
-1
Reset the state of the evironment for a new episode `setup` is used to let the reset function know when we're calling it from `setup`. If we don't, the 'random' init scheme should reset to the randomly choosen position instead of picking a new random one.
def reset(self, setup=False): self._done = False self._nbSteps = 0 x = None if (self.startPosX == 'random' and setup) or ( self.startPosX == 'episodeRandom'): x = random.randint(0, self._width - 1) elif (self.startPosX == 'random' and not setup): x = self._initState[0] elif self.startPosX == 'center': x = self._width - 1 else: x = int(self.startPosX) y = None if (self.startPosX == 'random' and setup) or ( self.startPosX == 'episodeRandom'): y = random.randint(0, self._height - 1) elif (self.startPosY == 'random' and not setup): y = self._initState[1] elif self.startPosX == 'center': y = self._height - 1 else: y = int(self.startPosX) self._currentPos = (x, y) self._trajectory = [(x, y)] return (x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = np.random.uniform(-10.0,10.0,size=(2))\n\n # initialize sheep positions\n if self.fixed_reset:\n init_sheep_pose = np.array([75.0, 75.0])\n self.sheep_poses = (np.random.uniform(-50.0, 50.0, \n size=(self.num_sheep,2))) + init_sheep_pose[None,:]\n else:\n init_sheep_pose = np.random.uniform(-self.init_sheep_root, \n self.init_sheep_root, size=(2))\n self.sheep_poses = (np.random.uniform(-self.init_sheep_range, \n self.init_sheep_range, size=(self.num_sheep,2))) \\\n + init_sheep_pose[None,:]\n self.sheep_com = self.sheep_poses.mean(axis=0)\n\n # get the farthest sheep and radius of the sheep\n dist_to_com = np.linalg.norm((self.sheep_poses - self.sheep_com[None,:]), axis=1)\n self.farthest_sheep = self.sheep_poses[np.argmax(dist_to_com),:]\n self.radius_sheep = np.array([np.max(dist_to_com)])\n\n # update distance to target\n self.target_distance = np.linalg.norm(self.target - self.sheep_com)\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n if self.fixed_reset:\n init_dog_pose = np.array([0.0,75.0])\n else:\n init_theta = np.random.uniform(-np.pi,np.pi)\n init_dog_pose = init_sheep_pose + self.init_dog_distance*np.array([np.cos(init_theta), \n np.sin(init_theta)])\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state", "def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()", "def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)", "def reset(self):\n self.tracker.reset()\n self.episode += 1\n self.episode_step = 0", "def reset(self, **kwargs):\n if self._backend_agent:\n self._backend_agent._on_gym_reset_begin(self, **kwargs)\n\n result = self.env.reset(**kwargs)\n if self.steps_done_in_episode > 0 and not self.is_episode_done:\n self.episodes_done += 1\n self.total.episodes_done_inc()\n self.is_episode_done = False\n self.steps_done_in_episode = 0\n\n if self._backend_agent:\n self._backend_agent._on_gym_reset_end(self, result, **kwargs)\n return result", "def reset(self):\n \n if self._config.fix_seed:\n self._init_seed = (self._init_seed + 1) % 2**32 # set_seed requires int\n self.game.set_seed(self._init_seed)\n\n super(ShootEnv, self).reset()\n\n self._killcount = 0.0\n self._ammo = self.game.get_game_variable(GameVariable.AMMO2)\n self._health = self.game.get_game_variable(GameVariable.HEALTH)\n\n return self._get_observation()", "def reset(self):\n\n self.curr_episode += 1\n self.curr_step = 0\n\n self.action_episode_memory.append([])\n self.rewards.append([])\n\n self.is_finalized = False\n init_state, init_reward = self._take_action(5 * np.random.randn(self.act_dimension))\n self.initial_conditions.append(init_state)\n return init_state", "def specific_reset(self) -> None:\n self.agent.specific_reset() # reset joints\n new_pos = self.agent.init_xyz\n new_pos[:2] = np.random.uniform(-0.01, 0.01, 2)\n self.agent.set_position(new_pos)\n self.old_potential = self.calculate_task_potential()", "def reset(self):\n while not self._check_episode_start_condition():\n self._simulate()\n self.state, _ = self._extract_state()\n return self.state", "def reset(\n self,\n *,\n seed: int | None = None,\n options: dict[str, Any] | None = None,\n ) -> tuple[np.ndarray, AtariEnvStepMetadata]:\n super().reset(seed=seed, options=options)\n del options\n # Gymnasium's new seeding API seeds on reset.\n # This will cause the console to be recreated\n # and loose all previous state, e.g., statistics, etc.\n seeded_with = None\n if seed is not None:\n seeded_with = self.seed(seed)\n\n self.ale.reset_game()\n obs = self._get_obs()\n\n info = self._get_info()\n if seeded_with is not None:\n info[\"seeds\"] = seeded_with\n return obs, info", "def _soft_reset(self):\n self._reset_specific_envs(self.episodes_done)\n self._update_other_info()", "def _reset_seeds(self) -> None:\n self._seeds = [None for _ in range(self.num_envs)]", "def reset_from_state(self, state):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = state[4:6]\n\n # initialize sheep com\n self.sheep_com = state[0:2]\n\n # get the farthest sheep and radius of the sheep\n self.farthest_sheep = state[2:4]\n self.radius_sheep = np.array([state[8]])\n\n # update distance to target\n self.target_distance = np.array([state[9]])\n\n # initialize sheep position\n self.sheep_poses = (np.random.uniform(-0.75*self.radius_sheep, \n 0.75*self.radius_sheep, size=(self.num_sheep,2))) \\\n + self.sheep_com[None,:]\n rnd_ind = np.random.choice(self.num_sheep)\n self.sheep_poses[rnd_ind,:] = state[2:4]\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n init_dog_pose = state[6:8]\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state", "def _reset(self):\r\n \r\n airgym.reset()\r\n self.stepN = 0\r\n self.episodeN += 1\r\n \r\n self.allLogs = { 'reward': [0] }\r\n self.allLogs['distance'] = [221]\r\n self.allLogs['action'] = [1]\r\n \r\n print(\"\")\r\n \r\n #self.sensors = airgym.getSensorStates()\r\n \r\n # Initial state\r\n self.state = airgym.getScreenDepthVis()\r\n \r\n \r\n return self.state", "def reset(self):\r\n \r\n self.done = False\r\n self.t = 0\r\n self.episode = random.choice(episodes)\r\n\r\n # initiate agent\r\n self.agent = self.create_agent(Agent)\r\n \r\n # initiate state at time zero\r\n self.state = (self.episode[self.t]['ST Relative Indicator'], \r\n self.episode[self.t]['ST Relative Indicator'], \r\n self.agent.stock,\r\n self.t)\r\n \r\n return self.state", "def _reset(self): # We are using a virtual function defined in the gym infrastructure.\n self.gazebo.unpauseSim()\n \"\"\"\n why we need to unpauseSim because resetting controllers and for checking the sensors, we need the simulation\n to be running because otherwise we don't have any sensory data and we don't have access to the controller reset\n functions services they won't work and tell you to hit play. => it is very important.\n \"\"\"\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.set_init_pose()\n #initialized robot\n self.gazebo.pauseSim()\n self.gazebo.resetSim()\n self.gazebo.unpauseSim()\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.gazebo.pauseSim()\n self.init_env_variables()\n obs = self._get_obs()\n simplified_obs = self.convert_obs_to_state(obs)\n\n return simplified_obs", "def resetEyes(self):\n\n\t\tself.leds.on(\"FaceLeds\")", "def resetEyes(self):\n\n\t\tself.leds.on(\"FaceLeds\")", "def reset(self):\n self.curr_episode += 1\n self.action_episode_memory.append([])\n self.is_game_done = False\n self.price = 1.00\n self.sendCmd(self.url,\"reset\")\n return self._get_state()", "def env_init(self, env_info={}):\n self.dealer_sticks = env_info['dealer_sticks']\n self.random = np.random.RandomState(env_info['seed'])\n self.current_state = None", "def _reset_(self):\n print(\"Resetting\")\n\n self._q_target_, x_target = self._pick_random_angles_()\n np.copyto(self._x_target_, x_target)\n if self._target_type == 'position':\n self._target_ = self._x_target_[self._end_effector_indices]\n elif self._target_type == 'angle':\n self._target_ = self._q_target_\n self._action_ = self._rand_obj_.uniform(self._action_low, self._action_high)\n self._cmd_prev_ = np.zeros(len(self._action_low)) # to be used with derivative control of velocity\n if self._reset_type != 'none':\n if self._reset_type == 'random':\n reset_angles, _ = self._pick_random_angles_()\n elif self._reset_type == 'zero':\n reset_angles = self._q_ref[self._joint_indices]\n self._reset_arm(reset_angles)\n\n rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(\n self._rand_obj_.get_state()\n )\n np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))\n\n print(\"Reset done\")", "def reset(self):\n self.tracker.reset()\n self.total_max_q = 0.0\n self.episode_step = 0\n self.episode += 1", "def reset(self, gui=False, test_ind=-1):\n # self.gui = gui\n # if gui:\n # # save episode to disk\n # if self._global_frames:\n # make_video_from_rgb_imgs(self._global_frames, self.output_path, f\"episode_global_{self.cur_episode}\")\n\n # for agent_id, frames in self._agent_frames.items():\n # if frames:\n # make_video_from_rgb_imgs(frames, self.output_path, f\"episode_{self.cur_episode}_{agent_id}\")\n\n # # clear frames of previous episode\n # self._global_frames = []\n # self._agent_frames = {agent_id: [] for agent_id in self.agent_tags}\n\n if (self.train_mode):\n seed = self.seed\n elif (test_ind < 0):\n seed = self.seed-1\n else:\n seed = self.test_seeds[test_ind]\n np.random.seed(seed)\n self.seed += 1\n\n self.cur_episode += 1\n self.t = 0 # step counter for each episode\n self.rewards = [0] # to keep track of global rewards\n obs = self.env.reset(done_only=False).cpu().numpy()\n\n # if self.gui:\n # self._global_frames.append(self.env.map_to_colors().astype(np.uint8))\n\n # for agent_id, agent_obs in obs.items():\n # self._agent_frames[agent_id].append(agent_obs.astype(np.uint8))\n\n # obs = list(obs.values())\n obs = self._get_state(obs) # new\n\n return obs", "def reset(self):\n # Sample random state from initial state distribution\n self._cur_state = self._sample_state(self._mdp.I)\n self._prev_state = self._cur_state", "def reset(self):\n self.observation = None\n self.episode_done = True", "def _restart_environment_episode(self, force_environment_reset=False) -> None:\n raise NotImplementedError(\"\")", "def reset(self):\n\n # Ending variables\n self.time_idle = 0\n self.time_episode = 0\n self.done_time_idle = False\n self.done_falling = False\n self.done_time_episode = False\n\n # hero variables\n self.last_location = None\n self.last_velocity = 0\n\n # Sensor stack\n self.prev_image_0 = None\n self.prev_image_1 = None\n self.prev_image_2 = None\n\n self.last_heading_deviation = 0", "def specific_reset(self) -> None:\n self.old_velocity = 0.\n self.agent.specific_reset()\n max_dist_to_origin = 4.\n min_dist_to_origin = 2\n\n agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)\n positioning_done = False\n while not positioning_done:\n agent_pos = np.random.uniform(-max_dist_to_origin,\n max_dist_to_origin, 2)\n if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:\n positioning_done = True\n\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # set agent orientation in forward run direction\n y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2\n y += self.agent.init_rpy[2]\n quaternion = self.bc.getQuaternionFromEuler([0, 0, y])\n self.agent.set_orientation(quaternion)", "def seed_random():\n random.seed(0)", "def reset(self):\r\n random.seed(1) # Set determanistic play\r\n if self.board is None:\r\n self.setup_board()\r\n self.board.reset()", "def reset(self):\n self._reset_next_step = False\n self.step_count = 0\n \n self._state = self.state_initializer()\n self._meta_state = self._meta_state_initializer()\n self.task.reset(self._state, self._meta_state)\n self.physics.reset(self._state)\n self.action_space.reset(self._state)\n for rule in self.game_rules:\n rule.reset(self._state, self._meta_state)\n rule.step(self._state, self._meta_state)\n \n return dm_env.restart(self.observation())", "def reset(self):\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n p.setTimeStep(self._time_step)\n p.setGravity(0, 0, -9.8)\n\n # load plane\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"plane.urdf\"), [0, 0, 0])\n # load robot\n self._darwin = DarwinopEnv()\n\n # Let the world run for a bit\n for _ in range(20):\n p.stepSimulation()", "def reset(self) -> None:\n self._rng = random.default_rng(self.seed)", "def test_env_reset_and_step(self):\n create_env = CreateEnv()\n env = create_env.env\n\n # Assert that the total number of agents matches the sum of the 'n_agents'\n # configuration and the number of planners (1 in this case)\n num_planners = 1\n self.assertEqual(\n len(env.all_agents), create_env.env_config[\"n_agents\"] + num_planners\n )\n\n # Assert that the number of agents created in the world\n # matches the configuration specification\n self.assertEqual(len(env.world.agents), create_env.env_config[\"n_agents\"])\n\n # Assert that the planner's index in the world is 'p'\n self.assertEqual(env.world.planner.idx, \"p\")\n\n obs = env.reset()\n\n # Test whether the observation dictionary keys are created as expected\n self.assertEqual(\n sorted(list(obs.keys())),\n [str(i) for i in range(create_env.env_config[\"n_agents\"])] + [\"p\"],\n )\n\n obs, reward, done, info = env.step({})\n\n # Check that the observation, reward and info keys match\n self.assertEqual(obs.keys(), reward.keys())\n self.assertEqual(obs.keys(), info.keys())\n\n # Assert that __all__ is in done\n assert \"__all__\" in done", "def resetEnv(self):\n obs = self.env.reset()\n self.state = torch.tensor(obs, device=self.device, dtype=torch.float).unsqueeze(0)\n return", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n if self.from_data:\n self.episode_num += 1\n\n\n return self.starting_state", "def init_game():\n raise ValueError(\"init_game is removed. Please use env.reset()\")", "def reset(self, blocking=True):\n ret = super(ReacherEnv, self).reset(blocking=blocking)\n self._episode_steps = 0\n return ret", "def reinitialize(self, random_state):\n pass", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, # type: ignore[override]\n *,\n seed: Optional[int] = None,\n options: Optional[Dict[str, Any]] = None,\n ) -> Tuple[DataNested, InfoType]:\n # Reset the seed if requested\n if seed is not None:\n self._initialize_seed(seed)\n\n # Stop the simulator\n self.simulator.stop()\n\n # Remove external forces, if any\n self.simulator.remove_all_forces()\n\n # Make sure the environment is properly setup\n self._setup()\n\n # Make sure the low-level engine has not changed,\n # otherwise some proxies would be corrupted.\n if self.engine is not self.simulator.engine:\n raise RuntimeError(\n \"Changing unexpectedly the memory address of the low-level \"\n \"jiminy engine is an undefined behavior.\")\n\n # Re-initialize some shared memories.\n # It is necessary because the robot may have changed.\n self.sensors_data = OrderedDict(self.robot.sensors_data)\n\n # Enforce the low-level controller.\n # The robot may have changed, for example it could be randomly\n # generated, which would corrupt the old controller. As a result, it is\n # necessary to either instantiate a new low-level controller and to\n # re-initialize the existing one by calling `controller.initialize`\n # method BEFORE calling `reset` method because doing otherwise would\n # cause a segfault.\n mock_controller = jiminy.ControllerFunctor()\n mock_controller.initialize(self.robot)\n self.simulator.set_controller(mock_controller)\n\n # Reset the simulator.\n # Do NOT remove all forces since it has already been done before, and\n # because it would make it impossible to register forces in `_setup`.\n self.simulator.reset(remove_all_forces=False)\n\n # Reset some internal buffers\n self.num_steps = 0\n self._num_steps_beyond_terminate = None\n\n # Create a new log file\n if self.debug:\n fd, self.log_path = tempfile.mkstemp(suffix=\".data\")\n os.close(fd)\n\n # Extract the observer/controller update period.\n # The controller update period is used by default for the observer if\n # it was not specify by the user in `_setup`.\n engine_options = self.simulator.engine.get_options()\n self.control_dt = float(\n engine_options['stepper']['controllerUpdatePeriod'])\n if self.observe_dt < 0.0:\n self.observe_dt = self.control_dt\n\n # Run the reset hook if any.\n # Note that the reset hook must be called after `_setup` because it\n # expects that the robot is not going to change anymore at this point.\n # Similarly, the observer and controller update periods must be set.\n reset_hook: Optional[Callable[[], JiminyEnvInterface]] = (\n options or {}).get(\"reset_hook\")\n env: JiminyEnvInterface = self\n if reset_hook is not None:\n assert callable(reset_hook)\n env_derived = reset_hook() or self\n assert env_derived.unwrapped is self\n env = env_derived\n self._env_derived = env\n\n # Instantiate the actual controller\n controller = jiminy.ControllerFunctor(env._controller_handle)\n controller.initialize(self.robot)\n self.simulator.set_controller(controller)\n\n # Configure the maximum number of steps\n self.max_steps = int(self.simulation_duration_max // self.step_dt)\n\n # Register user-specified variables to the telemetry\n for header, value in self._registered_variables.values():\n register_variables(controller, header, value)\n\n # Sample the initial state and reset the low-level engine\n qpos, qvel = self._sample_state()\n if not jiminy.is_position_valid(\n self.simulator.pinocchio_model, qpos):\n raise RuntimeError(\n \"The initial state provided by `_sample_state` is \"\n \"inconsistent with the dimension or types of joints of the \"\n \"model.\")\n\n # Start the engine\n self.simulator.start(\n qpos, qvel, None, self.simulator.use_theoretical_model)\n\n # Initialize shared buffers\n self._initialize_buffers()\n\n # Update shared buffers\n self._refresh_buffers()\n\n # Initialize the observation\n env._observer_handle(\n self.stepper_state.t,\n self.system_state.q,\n self.system_state.v,\n self.robot.sensors_data)\n\n # Initialize specialized most-derived observation clipping operator\n self._get_clipped_env_observation = build_clip(\n env.observation, env.observation_space)\n\n # Make sure the state is valid, otherwise there `refresh_observation`\n # and `_initialize_observation_space` are probably inconsistent.\n try:\n obs: ObsT = cast(ObsT, self._get_clipped_env_observation())\n except (TypeError, ValueError) as e:\n raise RuntimeError(\n \"The observation computed by `refresh_observation` is \"\n \"inconsistent with the observation space defined by \"\n \"`_initialize_observation_space` at initialization.\") from e\n\n # Make sure there is no 'nan' value in observation\n for value in tree.flatten(obs):\n if np.isnan(value).any():\n raise RuntimeError(\n f\"'nan' value found in observation ({obs}). Something \"\n \"went wrong with `refresh_observation` method.\")\n\n # The simulation cannot be done before doing a single step.\n if any(self.has_terminated()):\n raise RuntimeError(\n \"The simulation has already terminated at `reset`. Check the \"\n \"implementation of `has_terminated` if overloaded.\")\n\n # Reset cumulative reward\n self.total_reward = 0.0\n\n # Note that the viewer must be reset if available, otherwise it would\n # keep using the old robot model for display, which must be avoided.\n if self.simulator.is_viewer_available:\n self.simulator.viewer._setup(self.robot)\n if self.simulator.viewer.has_gui():\n self.simulator.viewer.refresh()\n\n return obs, deepcopy(self._info)", "def reset(self, **kwargs):\r\n if self.was_real_done:\r\n obs = self.env.reset(**kwargs)\r\n else:\r\n # no-op step to advance from terminal/lost life state\r\n obs, _, _, _ = self.env.step(0)\r\n self.lives = self.env.unwrapped.ale.lives()\r\n return obs", "def reset(self):\n \n self.steps = 0\n if self.episode == 0:\n self.ins = random.uniform(self.mins.values[:4],self.maxes.values[:4])\n #get the corresponding outputs:\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n self.starts = np.append(self.ins, outs)\n\n else:\n self.starts = self.state[:7] #previous episode's end state\n\n #get goals from random inputs:\n viable = False\n while viable == False:\n self.ins = random.uniform((self.mins.values[:4]+(self.mins.values[:4]*self.minmaxbuffer)),self.maxes.values[:4]-(self.maxes.values[:4]*self.minmaxbuffer))\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n \n # Check if viable:\n viable = self.test_viable(outs)\n\n self.goals = outs\n\n # These are your current inputs:\n self.ins = self.starts[:4]\n # State carries the starting points and the goals.\n self.state = np.append(self.starts,self.goals)\n\n #Track episodes and total reward.\n self.episode += 1\n self.tot_rew = 0\n\n return (self.state)", "def reset(self):\n error_estop = \"\"\"\\\nE-Stop is ASSERTED. Disengage E-Stop and then reset the robot.\n\"\"\"\n error_nonfatal = \"\"\"Non-fatal Robot Error on reset.\nRobot reset cleared stopped state and robot can be enabled, but a non-fatal\nerror persists. Check diagnostics or rethink.log for more info.\n\"\"\"\n error_env = \"\"\"Failed to reset robot.\nPlease verify that the ROS_IP or ROS_HOSTNAME environment variables are set\nand resolvable. For more information please visit:\nhttp://sdk.rethinkrobotics.com/wiki/RSDK_Shell#Initialize\n\"\"\"\n is_reset = lambda: (self._state.enabled == False and\n self._state.stopped == False and\n self._state.error == False and\n self._state.estop_button == 0 and\n self._state.estop_source == 0)\n pub = rospy.Publisher('robot/set_super_reset', Empty, queue_size=10)\n\n if (self._state.stopped and\n self._state.estop_button == AssemblyState.ESTOP_BUTTON_PRESSED):\n rospy.logfatal(error_estop)\n raise IOError(errno.EREMOTEIO, \"Failed to Reset: E-Stop Engaged\")\n\n rospy.loginfo(\"Resetting robot...\")\n try:\n baxter_dataflow.wait_for(\n test=is_reset,\n timeout=3.0,\n timeout_msg=error_env,\n body=pub.publish\n )\n except OSError as e:\n if e.errno == errno.ETIMEDOUT:\n if self._state.error == True and self._state.stopped == False:\n rospy.logwarn(error_nonfatal)\n return False\n raise", "def reset(self):\n # Reset time counter\n self.t = 0\n\n # Reset randomization\n self.randomization.reset()\n\n # Randomize parameters.\n self.parameters = self.randomization.parameter_randomizer.randomize(\n self.parameters, self._random_state\n )\n\n self._reset()\n\n # Randomize simulation. Because sim is recreated in self._reset(),\n # simulation_randomizer.randomize should be called after the _reset.\n self.randomization.simulation_randomizer.randomize(\n self.mujoco_simulation.mj_sim, self._random_state\n )\n\n # reset observer.\n self.observer.reset()\n\n # Reset multi goal tracker for a new episode.\n self.multi_goal_tracker.reset()\n\n # Reset state of goal generation.\n return self.reset_goal_generation(sync_type=SyncType.RESET)", "def _initialize_episode(self, agent_type='active'):\n initial_observation = self._environment.reset()\n return self._agent.begin_episode(agent_type, initial_observation)", "def reset(self):\n Simulation.reset(self)", "def reset_world():\n __switch_ctrl.call(start_controllers=[],\n stop_controllers=[\"hand_position_trajectory_controller\", \"arm_position_trajectory_controller\", \"joint_state_controller\"],\n strictness=SwitchControllerRequest.BEST_EFFORT)\n __pause_physics.call()\n\n joint_names = ['j1', 'j2', 'j3', 'j4', 'j5', 'flange','H1_F1J1', 'H1_F1J2',\n 'H1_F1J3', 'H1_F2J1', 'H1_F2J2', 'H1_F2J3','H1_F3J1', 'H1_F3J2', 'H1_F3J3']\n joint_positions = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] \n\n __set_model.call(model_name=\"denso\",\n urdf_param_name=\"robot_description\",\n joint_names=joint_names,\n joint_positions=joint_positions)\n\n timer = Timer(0.0, __start_ctrl)\n timer.start()\n\n time.sleep(0.1)\n __unpause_physics.call()\n\n #__reset_world.call()\n spawn_extras()", "def setup_method(cls):\n seed()", "def _reset(self, env_id: np.ndarray) -> None:", "def reset():\n _runtime.reset()", "def reset(\n self,\n seed: int | None = None,\n options: dict | None = None,\n ) -> None:\n raise NotImplementedError", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def reset(self):\n if self._draw_new_turn_on_reset:\n turn_params = self._draw_random_turn_params()\n config = AisleTurnEnvParams(turn_params=turn_params, env_params=self._env_params)\n self._env = AisleTurnEnv(config)\n\n return self._env.reset()", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def reset(self,\n *,\n seed: Optional[int] = None,\n options: Optional[dict] = None,\n ) -> Tuple[Observation, dict]:\n super().reset(seed=seed, options=options)\n if options and \"config\" in options:\n self.configure(options[\"config\"])\n self.update_metadata()\n self.define_spaces() # First, to set the controlled vehicle class depending on action space\n self.time = self.steps = 0\n self.done = False\n self._reset()\n self.define_spaces() # Second, to link the obs and actions to the vehicles once the scene is created\n obs = self.observation_type.observe()\n info = self._info(obs, action=self.action_space.sample())\n if self.render_mode == 'human':\n self.render()\n return obs, info", "def reset(self):\n self.num_steps = 0\n self.world_state = self.action = None", "def set_env(self, env):\n\n self.env = env\n self.sim_env = copy.deepcopy(self.env)\n self.sim_env.reset_at_episode_end = False # Avoids expensive re-sampling of jets every time we parse a path\n self.init_episode()", "def reset(self, **kwargs):\n\n # on a reset we set the health back to 120\n self.player_hp = 120\n self.enemy_hp = 120\n\n # reset the environment\n \n observation = self.env.reset(**kwargs)\n\n # we restarted inc the number\n self.num_resets += 1\n\n # the observation\n obs = self.observation(observation)\n self.current_frame_number = 0\n \n # fill up the queue\n for i in range(4):\n self.q.append(obs)\n \n return np.array(list(self.q))", "def reset(self):\n self.success = False\n self.i = 0\n if self.monitor:\n self.env = gym.wrappers.Monitor(self.env, \"./mountaincar-monitor\", force=True)\n state = self.env.reset()\n state = self.preprocess_state(state)\n state = np.concatenate([state] * self.action_repeat)\n return state", "def reset(ctx, with_testdb):\n ctx.invoke(init, with_testdb=with_testdb)\n ctx.invoke(seed)\n\n return None", "def reset(ctx, with_testdb):\n ctx.invoke(init, with_testdb=with_testdb)\n ctx.invoke(seed)\n\n return None", "def reset(self, observation, target_pos, ground_truth):\n if len(self.episode_starts) == 0 or self.episode_starts[-1] is False:\n self.episode_idx += 1\n\n if self.learn_states and (self.episode_idx + 1) % self.learn_every == 0 and self.n_steps <= self.max_steps:\n print(\"Learning a state representation ...\")\n start_time = time.time()\n ok, self.srl_model_path = self.socket_client.waitForSRLModel(self.state_dim)\n print(\"Took {:.2f}s\".format(time.time() - start_time))\n\n self.episode_step = 0\n self.episode_success = False\n self.episode_folder = \"record_{:03d}\".format(self.episode_idx)\n os.makedirs(\"{}/{}\".format(self.data_folder, self.episode_folder), exist_ok=True)\n\n self.episode_starts.append(True)\n self.target_positions.append(target_pos)\n self.ground_truth_states.append(ground_truth)\n self.saveImage(observation)", "def reset(self):\n self.sim.reset()\n # state = np.concatenate([self.sim.pose] * self.action_repeat) \n state = self.sim.pose\n return state", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def reset(self):\r\n pg.event.clear()\r\n self.stop_powerpellet()\r\n if not self.eaten:\r\n self.dot_counter = 0\r\n self.eaten = False\r\n self.eat_ghost = False\r\n self.reset_energizer_flag()\r\n self.last_dir = self.direction = 'l'\r\n self.count_eaten_ghost = 200\r\n self.pos = list(self.start_pos)[:]\r\n self.global_counter = 0\r\n self.hourglass_counter = 0", "def restart(self):\n self.set_random_pos('starting')\n self.set_random_pos('finishing')\n self.game_loop()", "def reset():\n Vessel.reset_instances()", "def reset(self,new_tgt_pos=None):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n if new_tgt_pos is not None:\n self.target_pos = new_tgt_pos \n return state", "def reset_env(\n self, key: chex.PRNGKey, params: EnvParams\n ) -> Tuple[chex.Array, EnvState]:\n # Always start with no stock\n # # By defauly, we start on a random weekday\n # Otherwise, with fixed burn-in, would always\n # count return from same weekday\n weekday = jax.lax.cond(\n params.initial_weekday == -1,\n lambda _: jax.random.randint(key, (), 0, 7, dtype=jnp_int),\n lambda _: params.initial_weekday.astype(jnp_int),\n None,\n )\n\n state = EnvState(\n weekday=weekday,\n stock=self.initial_stock,\n step=0,\n )\n return self.get_obs(state), state", "def reset(self):\n self.cur_step = 0\n self.cur_episode += 1\n self.cur_state = 0 # Always start with fixed state\n self.action_episode_memory.append([])\n self.observation_episode_memory.append([])\n self.reward_episode_memory.append([])\n obs = self._get_obs()\n return obs", "def set_global_seeds(seed):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "def reset(self, normalized_state, state):\n # self.episode_starts.append(True)\n self.normalized_states.append(normalized_state)\n self.states.append(np.squeeze(state))", "def reset_env(\n self, key: chex.PRNGKey, params: EnvParams\n ) -> Tuple[chex.Array, EnvState]:\n # Always start with no stock\n # # By defauly, we start on a random weekday\n # Otherwise, with fixed burn-in, would always\n # count return from same weekday\n weekday = jax.lax.cond(\n params.initial_weekday == -1,\n lambda _: jax.random.randint(key, (), 0, 7, dtype=jnp_int),\n lambda _: params.initial_weekday.astype(jnp_int),\n None,\n )\n\n state = EnvState(\n weekday=weekday,\n stock=jnp.zeros(self.max_useful_life - 1, dtype=jnp_int),\n step=0,\n )\n return self.get_obs(state), state", "def reset(self):\n print('call reset()')\n self.cur = 0\n if self.shuffle:\n random.shuffle(self.seq)", "def reset(self):\n self.sim.reset()\n self.takeoff = False\n self.success = False\n state = np.concatenate([self.sim.pose] * self.action_repeat)\n return state", "def reset(self, x=0.2, y=0.2):\n\n # Reset the environment (start a new episode)\n self.y = y\n self.x = x\n self.theta = 90\n self.theta = math.radians(self.theta)\n self.steps = 0\n self.index = 0\n self.farthest = -1\n self.laps = 0\n\n # add noise to position and theta\n # self.x_noise = self.x + np.random.normal(self.mu, self.sigmaxy, 1)\n # self.y_noise = self.y + np.random.normal(self.mu, self.sigmaxy, 1)\n # self.theta_noise = self.theta + np.random.normal(self.mu,\n # self.sigmaangle, 1)\n\n self._distance_next()\n self._calc_delta_theta()\n\n if self.discrete_input:\n # discretize state for the agent to control\n\n discrete_distance, discrete_delta_theta \\\n = self._discretize_agent_state(self.distance, self.delta_theta)\n\n self.agent_state = np.array([discrete_distance,\n discrete_delta_theta])\n\n else:\n # self.agent_state has to be a matrix to be accepted by keras\n self.agent_state = np.array([self.distance, self.delta_theta])\n\n # Create state (x,y,theta)\n self.state = [self.x, self.y, self.theta]\n\n return self.state, self.agent_state", "def episode_init(self):\n self.nsteps = 0\n self.action = None\n self.state = None\n self.reward = None\n self.terminal = None\n self.total_reward = 0.0", "def rngreset(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True", "def reset(self):\n self.noise = [0.] * 6\n self.state = [0.0] * 9\n self.q = [0.0, 0.0, 0.0, 1.0]\n self.terminal = False\n self.steps = 0\n # Return current state and error\n return self.observation, self.error", "def reset(self, *args):\n self.state = GameStates.playing\n self.human = evilrps.Player('Human', self.get_player_choice)\n self.ai = evilrps.Player('AI', evilrps.create_ai())\n self.game = evilrps.Game(self.human, self.ai)", "def reset_target(scope) -> None:\n if globals.cw_platform == \"CW303\" or globals.cw_platform == \"CWLITEXMEGA\":\n scope.io.pdic = 'low'\n time.sleep(0.1)\n scope.io.pdic = 'high_z' #XMEGA doesn't like pdic driven high\n time.sleep(0.1) #xmega needs more startup time\n else: \n scope.io.nrst = 'low'\n time.sleep(0.05)\n scope.io.nrst = 'high_z'\n time.sleep(0.05)", "def reset(self):\n\n logger.info('Removed existing OpenMM engine.')\n self._simulation = None", "def reset(self):\n\n self.speed = self.getRandomVelocity()\n self.setX(Configuration.windowWidth / 2)\n self.setY(Configuration.windowHeight / 2)", "def reset(self, **kwargs):\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs", "def reset(self):\r\n self.x = self.initX\r\n self.y = self.initY\r\n self.dir= self.initDir", "def set_global(self):\n random.setstate(self.py)\n np.random.set_state(self.np)\n torch.set_rng_state(self.torch)", "def run(self, seed=None):\n if seed is not None:\n random_seed.set_seed(seed)\n self.reset()", "def reset(self):\n # self state is velocity, observation is velocity\n # self._state = np.random.uniform(0, self.GOAL_VELOCITY, size=(self.num_cars,))\n # print(\"In reset function\", self.initialized)\n if self.initialized:\n traci.close()\n sumoProcess = subprocess.Popen([self.sumoBinary, \"-c\", self.cfgfn, \"--remote-port\", str(self.PORT)], stdout=sys.stdout, stderr=sys.stderr)\n traci.init(self.PORT)\n traci.simulationStep()\n if not self.initialized:\n self.vehIDs = traci.vehicle.getIDList()\n print(\"ID List in reset\", self.vehIDs)\n self.initialized = True\n self._state = np.array([traci.vehicle.getSpeed(vID) for vID in self.controllable])\n observation = np.copy(self._state)\n return observation", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0", "def prepare_for_use(self):\n self._random_state = check_random_state(self.random_state)\n self._offset = self.offset\n self._x_idx = 0" ]
[ "0.66944516", "0.6647286", "0.65143913", "0.6312656", "0.6290814", "0.62830704", "0.6277763", "0.62062037", "0.61978877", "0.6173854", "0.61685586", "0.61399466", "0.613353", "0.61265284", "0.6108667", "0.6104944", "0.60824627", "0.60824627", "0.6075161", "0.6065723", "0.60425687", "0.6032285", "0.60291636", "0.5983385", "0.59776616", "0.59693563", "0.5967431", "0.59631056", "0.5941566", "0.59100235", "0.59031326", "0.5889999", "0.58571434", "0.5834076", "0.5827408", "0.58233976", "0.5819535", "0.5818656", "0.5816387", "0.58091414", "0.58091414", "0.58091414", "0.58091414", "0.58091414", "0.58091414", "0.5802788", "0.5790347", "0.57829505", "0.57786286", "0.5775214", "0.5759442", "0.57378894", "0.57274944", "0.5725739", "0.5724762", "0.57218033", "0.5715117", "0.5706373", "0.5705369", "0.5670608", "0.5656151", "0.565257", "0.56498444", "0.56152123", "0.5615158", "0.56119424", "0.56119424", "0.56109786", "0.5604719", "0.56042117", "0.55915916", "0.5576077", "0.55697566", "0.5541169", "0.55348796", "0.5534629", "0.55342895", "0.55336565", "0.55336565", "0.55336565", "0.5519929", "0.5498642", "0.5496687", "0.5492959", "0.54808027", "0.5479382", "0.5471577", "0.5469426", "0.54681575", "0.54511786", "0.5443704", "0.54357016", "0.54257953", "0.54227257", "0.5421158", "0.5420466", "0.5412168", "0.541078", "0.541078", "0.541078" ]
0.6798604
0
Render the environment serverside
def render(self, mode="human", close=False): if close and self._viewer is None: if self._viewer is not None: self._viewer.close() self._viewer = None return screen_width = 600 screen_height = 600 if self._viewer is None: from gym.envs.classic_control import rendering self._viewer = rendering.Viewer(screen_width, screen_height) # generate the grid xs, self._xstep = np.linspace( 0, screen_width, self._width + 1, retstep=True) ys, self._ystep = np.linspace( 0, screen_height, self._height + 1, retstep=True) # render the grid for x in xrange(self._width): for y in xrange(self._height): l, r, t, b = (0, self._xstep, self._ystep, 0) tile = rendering.FilledPolygon([ (l, b), (l, t), (r, t), (r, b)]) tile.add_attr(rendering.Transform(translation=( x * self._xstep, y * self._ystep))) tile.set_color(*CASE_COLORS[chr(self._grid[x, y])]) self._viewer.add_geom(tile) # render starting point l, r, t, b = (0, self._xstep, self._ystep, 0) tile = rendering.FilledPolygon([ (l, b), (l, t), (r, t), (r, b)]) tile.add_attr(rendering.Transform(translation=( self._trajectory[0][0] * self._xstep, self._trajectory[0][1] * self._ystep))) tile.set_color(0, 1.0, 1.0) self._viewer.add_geom(tile) # render grid lines for x in xs[1:len(xs) - 1]: # not including the first and last one line = rendering.Line((x, 0), (x, screen_height)) self._viewer.add_geom(line) for y in ys[1: len(ys) - 1]: line = rendering.Line((0, y), (screen_width, y)) self._viewer.add_geom(line) agent = rendering.make_circle( radius=min( screen_width / (self._width + 1) / 3, screen_height / (self._height + 1) / 3), res=30) self._agentTrans = rendering.Transform(translation=( self._currentPos[0] * self._xstep + (self._xstep / 2), self._currentPos[1] * self._ystep + (self._ystep / 2))) agent.add_attr(self._agentTrans) self._viewer.add_geom(agent) self._renderTrajectory() self._agentTrans.set_translation( self._currentPos[0] * self._xstep + (self._xstep / 2), self._currentPos[1] * self._ystep + (self._ystep / 2)) self._viewer.render(return_rgb_array=(mode == 'rgb_array')) if close: if self._viewer is not None: self._viewer.close() self._viewer = None return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(self):\n self.env.render()", "def render(self):\n self.rendering = True\n self.env.render()", "def render(self, mode='human'):\n return self._env.render(mode)", "def _serve_environment(self, request):\n return http_util.Respond(\n request,\n {\n 'data_location': self._logdir or self._db_uri,\n 'window_title': self._window_title,\n },\n 'application/json')", "def serve(self) -> str:\n return self._render()", "def render(self, mode='human'):\n\n if self.RENDER_ENV_ONLY:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=np.array([120, 120, 120])/255.0)\n bezel = 10\n \n self._env_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n self._agent_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n if (self.RENDER_INDIV_MEMORY == True and self.INDIV_MEMORY == \"fog\") or (self.RENDER_TEAM_MEMORY == True and self.TEAM_MEMORY == \"fog\"):\n SCREEN_W = 1200\n SCREEN_H = 600\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n \n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n\n self._env_render(self._static_map,\n [7, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [7+1.49*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_red_render,\n [7+1.49*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [7, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # ind blue agent memory rendering\n for num_blue, blue_agent in enumerate(self._team_blue):\n if num_blue < 2:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+num_blue*SCREEN_H//4, 7], [SCREEN_H//4-10, SCREEN_H//4-10])\n else:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+(num_blue-2)*SCREEN_H//4, 7+SCREEN_H//4], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n # ind red agent memory rendering\n for num_red, red_agent in enumerate(self._team_red):\n if num_red < 2:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+num_red*SCREEN_H//4, 7+1.49*SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n \n else:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+(num_red-2)*SCREEN_H//4, 7+SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n if self.TEAM_MEMORY == \"fog\" and self.RENDER_TEAM_MEMORY == True:\n # blue team memory rendering\n blue_visited = np.copy(self._static_map)\n blue_visited[self.blue_memory] = UNKNOWN\n self._env_render(blue_visited,\n [7+2.98*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # red team memory rendering \n red_visited = np.copy(self._static_map)\n red_visited[self.red_memory] = UNKNOWN\n self._env_render(red_visited,\n [7+2.98*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n else:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n \n self._env_render(self._static_map,\n [5, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10], self._team_blue)\n self._env_render(self.get_obs_red_render,\n [5+SCREEN_W//2, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n\n if self.SILENCE_RENDER:\n return self.viewer.get_array()\n else:\n return self.viewer.render(return_rgb_array = mode=='rgb_array')", "def serve_vue_app():\n return(render_template('index.html'))", "def render(self):\n self.env.render()\n #input(\"Press enter to take a step \")", "def display():\n\n #still needs some cleanup on imagry and what the site is about. \n\n return render_template(\"index.html\")", "def main():\n return render_template('index.html')", "def main():\n return render_template('index.html')", "def entry_point():\n return render_template(\"index.html\")", "def run():\n return render_template('index.html')", "def server(request):\n return render_to_response(\n 'server/index.html', {\n 'user_url': getViewURL(request, idPage),\n 'server_xrds_url': getViewURL(request, idpXrds),\n },\n context_instance=RequestContext(request))", "def browser():\n return flask.render_template('browser.html')", "def frontend(request: HttpRequest) -> HttpResponse:\n return render(request, \"frontend/base.html\", {})", "def render(env_type):\n _setup_env()\n\n # Activate local virtual environment (for render_templates+flask?)\n local('. %s' % env.activate_path)\n\n if not os.path.exists(env.s3cmd_cfg):\n abort(\"Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.\")\n\n if not env_type in _config['deploy']:\n abort('Could not find \"%s\" in \"deploy\" in config file' % env_type)\n\n if not \"bucket\" in _config['deploy'][env_type]:\n abort('Could not find \"bucket\" in deploy.%s\" in config file' % env_type)\n\n if 'usemin_context' in _config['deploy'][env_type]:\n usemin_context = _config['deploy'][env_type]['usemin_context']\n else:\n usemin_context = None\n\n template_path = join(_config['project_path'], 'website', 'templates')\n deploy_path = join(_config['project_path'], 'build', 'website')\n\n clean(deploy_path)\n\n # Render templates and run usemin\n if 'deploy_context' in _config['deploy'][env_type]:\n deploy_context = _config['deploy'][env_type]['deploy_context']\n else:\n deploy_context = {}\n\n # Sometimes we need this path append to import app from website\n # in render_templates, dunno why:\n sys.path.append(_config['project_path'])\n\n static.render_templates(template_path, deploy_path, deploy_context)\n static.usemin(_config, [deploy_path], usemin_context)\n\n # Copy static files\n static.copy(_config, [{\n \"src\": join(_config['project_path'], 'website', 'static'),\n \"dst\": join(deploy_path, 'static')\n }])\n\n # Additional copy?\n if 'copy' in _config['deploy'][env_type]:\n static.copy(_config, _config['deploy'][env_type]['copy'])", "def main():\n return render_template(\"main.html\")", "def main():\n return render_template(\"main.html\")", "def home(environ):\n return render_template(template_name=\"index.html\", context={})", "def main():\r\n return render_template(\"UI.html\")", "def render(trin, config, args):\n\n from forge.embyr.twistedserver import Application\n sword = trin.sword.remote(trin, config, args, idx=0)\n env = sword.getEnv.remote()\n Application(env, sword.tick.remote)", "def dashboard_render(self,servers):\n THIS_DIR = os.path.dirname(os.path.abspath(__file__))\n j2_env = Environment(loader=FileSystemLoader(THIS_DIR),\n trim_blocks=True)\n new_dashboard = (j2_env.get_template('templating_dashboard.json').render(\n list_templating=self.templating(servers)\n ))\n return (new_dashboard)", "def get(self):\n self.render(\"index.html\")", "def render_env(self):\n return {\n jinja2.Template(k).render({self.name: self}):\n jinja2.Template(v).render({self.name: self})\n for k, v in self.env.items()\n } if self.env else self.env", "def index():\n return render_template('home.jinja2')", "def main():\n\n return render_template(\"index.html\", title=\"Home\", heading=\"Dublin Bus\")", "def get(self):\n template = Constants.JINJA_ENVIRONMENT.get_template('homepage.html')\n self.response.headers['Content-Type'] = 'text/html'\n self.response.write(template.render())", "def get(self):\n self.render(\n \"index.html\",\n )", "def index():\n return render_template(\"main.html\")", "def get(self):\n # this trick allows us to return the correct image url, even in docker\n global _host\n _host = self.request.host\n _logger.debug(\"setting hostname to {0}\".format(_host))\n env = Environment(loader=FileSystemLoader('templates'))\n template = env.get_template('embed.html')\n script = server_document('http://{0}/bkapp'.format(_host))\n self.set_status(200)\n self.write(template.render(script=script, template=\"Tornado\"))\n self.finish()", "def get(self):\n self.render('index.html')\n return", "def test():\n\n print \"SERVER IS RUNNING\"\n\n\n return render_template(\"statemap.html\")\n # return render_template(\"testworld.html\")", "def index():\n return render_template('main.html')", "def index():\n return render_template('main.html')", "def index():\n global realsense_enabled\n return render_template('index.html')", "def index(self):\n return render_template(\"{0}/index.html\".format(self.__APP_DIR__))", "def Home():\n resp = render_template('index.html')\n return resp", "def root():\n return render_template('index.html')", "def root():\n return render_template('index.html')", "def root():\n return render_template('index.html')", "def start(response, input={}):\n try: host = socket.gethostname()\n except AttributeError:\n if os.environ.get('HTTP_HOST'): host = os.environ['HTTP_HOST']\n else: host = os.environ['SERVER_NAME']\n template = LazyDict({'version': getversion(), 'host': host, 'color': Config().color or \"#C54848\"})\n if input: template.update(input)\n temp = os.path.join(os.getcwd(), 'templates/console.html')\n outstr = template.render(temp)\n response.out.write(outstr)", "def main():\n access_token = get_access_token()\n\n return render_template('index.html', ACCESS_TOKEN=access_token)", "def home():\n stocks = preprocess()\n\n return render_template(\"main.html\",stocks=stocks)", "def createBasicRenderSetup():\n\n pass", "def index():\n with app.app_context():\n return render_template(\"public/index.html\")", "def get_root():\r\n return render_template(\"index.html\"), 200", "def get(self):\n self.render('view.html')", "def front_page():\n vars = dict(request.args)\n vars.setdefault('output', vars.get('format'))\n\n key = vars.get('auth_entity')\n if key:\n vars['entity'] = ndb.Key(urlsafe=key).get()\n if vars['entity']:\n vars.setdefault('site', vars['entity'].site_name().lower())\n\n vars.update({\n silo + '_html': module.Start.button_html(\n f'/{silo}/start_auth',\n image_prefix='/oauth_dropins_static/',\n outer_classes='col-lg-2 col-sm-4 col-xs-6',\n scopes=SCOPE_OVERRIDES.get(silo, ''),\n )\n for silo, module in OAUTHS.items()})\n\n return render_template('index.html', **vars)", "def index():\n return render_template('index.html', title='PanOS Bootstrap Utility')", "def setupRender():\n prefs = getPreferences()\n\n # Check of the built-in environment maps path can be located.\n # Discontinue if it cannot be found.\n envPath = prefs.path_value\n if not envPath:\n return {'WARNING'}, \"No environment images path defined\"\n\n # Discontinue if there is no output path defined.\n renderPath = outputPath()\n if not renderPath:\n return {'WARNING'}, \"The scene needs to be saved before rendering\"\n\n if prefs.image_value == 'NONE':\n return {'WARNING'}, \"No environment image defined\"\n\n setRenderSettings(os.path.join(renderPath, IMAGE_NAME))\n createCamera()\n createWorld(envPath)\n return renderPath", "def hello():\n\n return render_template(\"index.html\")", "def run(self) -> None:\n self._render()\n print(self.sio.getvalue())", "def index():\n print('This is the root of the app, should have something better')\n return 'Root, this is where some front end would go on a server'", "def index():\r\n return render_template('index.html')", "def partaj_context(request):\n frontend_context = {\n \"assets\": {\"icons\": static(\"core/icons.svg\")},\n \"crisp_website_id\": settings.CRISP_WEBSITE_ID,\n \"csrftoken\": get_token(request),\n \"environment\": settings.ENVIRONMENT,\n \"url_admin\": reverse(\"admin:index\"),\n \"url_logout\": reverse(\"cas_ng_logout\"),\n }\n\n if settings.SENTRY_DSN:\n frontend_context[\"sentry_dsn\"] = settings.SENTRY_DSN\n\n if request.user.is_authenticated:\n frontend_context[\"token\"] = str(\n Token.objects.get_or_create(user=request.user)[0]\n )\n\n return {\"FRONTEND_CONTEXT\": json.dumps(frontend_context)}", "def get(self):\n return render_template(\"index.html\")", "def render_home():\r\n\treturn render_template(\"index.html\")", "def root():\n return render_template('root.html')", "def main_page():\n return render_template(\"index.html\")", "def homepage(): \n return render_template('home/index.html',title='Welcome to SYCLIQ')\n #return render_template_string(\"Welcome to SYCLIQ\")", "def show_homepage():\n\n return render_template(\"blank-slate.html\")", "def index():\n # Render template\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def get(self):\n self.response.write(view_utils.render('base.html', {}))", "def render_scheudle_view(path):\n client_build_dir = 'client/build'\n if path != \"\" and os.path.exists(os.path.join(client_build_dir, path)):\n return flask.send_from_directory(client_build_dir, path)\n else:\n return flask.send_from_directory(client_build_dir, 'index.html')", "def renderPage():\n return render_template(\"index.html\")", "def index(request):\n return render_to_response(\n # note: this is slightly different than the labs app with \"app/app.html\" rather than the labs/labs.html\n # and we don't pass submodule name. fixme, by changing to new style with name = app_name\n settings.JS_HOME+'app.html',\n {'INDIVO_UI_APP_CSS': settings.INDIVO_UI_SERVER_BASE+'/jmvc/ui/resources/css/ui.css'}\n )", "def angular_main_page():\n print(session)\n return render_template(\"/angular_main_page.html\")", "def root():\n return flask.render_template('index.html')", "def landing():\n return render_template('index.html', token=webview.token)", "def landing():\n return render_template('index.html', token=webview.token)", "def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html", "def home():\n return render_template(\"d3_graph.html\")", "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html',\n {\n 'title':'Aplicacion para transpasar datos del sensor FreeStyle Libre de MongoDB a MySql',\n 'year':datetime.now().year,\n }\n )", "def renderHTTP(ctx):", "def __call__(self, req, res):\n if not hasattr(res, 'render'):\n res.render = Renderer(res)\n res.locals = {}\n res.render.add_engine(self)", "def render(self, mode='human'):\n\n pass", "def view_landing_page():\n return render_template(\"index.html\")", "def index():\n \n \n return render_template('index.html')", "def run_html():\n if __name__ != \"__main__\":\n app.run(debug=True)", "def index():\n return render_template('index.html'), 200", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def RenderEnv(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def landing_page():\n\n return render_template('index.html')", "def index(self):\n\t\treturn render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index():\n\n\treturn(render_template('index.html'))", "def index():\n #Getting source\n sources = get_sources()\n print(sources)\n return render_template('index.html', sources = sources)", "def render(self, mode='human'):\n pass # no use in this situation", "def welcome():\n return render_template(\"NFL.html\")", "def index(self):\n return render_template('main/index.html')", "def render(self, mode='human'):", "def home():\n return render_template('main.html')", "def index():\n\n return render_template(\"index.html\"), 200", "def index() -> Any:\n return render_template(\"index.html\")", "def setup(self):\n print(\"Inside setup\")\n self.fetch_from_db()\n # Don't assign to the function output. Assing it to the function object so that whenever we\n # do some changes to layout, it will reflect without server restarting\n app.layout = self.get_root_layout\n return app.server", "def index(self, **kw):\n\n template = self.context\n request = self.request\n\n request.response.setHeader('content-type',\n template.content_type)\n\n return template.render(request, **kw)", "def home():\n return render_template(\"index.html\")" ]
[ "0.8079139", "0.7380633", "0.6701152", "0.6639087", "0.6605521", "0.6443869", "0.6436322", "0.64339954", "0.6354514", "0.6319063", "0.6319063", "0.6318727", "0.62650275", "0.6256661", "0.62470657", "0.62287277", "0.6225323", "0.6188681", "0.6188681", "0.61411625", "0.61068594", "0.6104807", "0.61025804", "0.60988903", "0.60519594", "0.605076", "0.60223687", "0.5994819", "0.5989986", "0.5964126", "0.5957838", "0.5945889", "0.5944655", "0.59420615", "0.59420615", "0.59416944", "0.5935708", "0.5933474", "0.5927994", "0.5927994", "0.5927994", "0.59087896", "0.5899255", "0.58943367", "0.58940905", "0.5889614", "0.58847076", "0.5883214", "0.5879425", "0.58728373", "0.5866742", "0.58397454", "0.5824557", "0.58178014", "0.58149904", "0.58133686", "0.58109725", "0.5803976", "0.5802057", "0.58009887", "0.57972354", "0.57959616", "0.5791541", "0.5788467", "0.5788101", "0.57783103", "0.5773926", "0.5773059", "0.57724565", "0.5769796", "0.5769735", "0.5769735", "0.5763292", "0.57632613", "0.57607055", "0.5756857", "0.57547396", "0.5752402", "0.5745306", "0.57429206", "0.57386535", "0.57364124", "0.5730737", "0.5730737", "0.5730135", "0.57297814", "0.5720903", "0.57122034", "0.57122034", "0.57095206", "0.5704845", "0.57030207", "0.5700497", "0.5699913", "0.56917065", "0.5690366", "0.5688967", "0.56873983", "0.5686322", "0.56816626", "0.5675159" ]
0.0
-1
Fonction sans argument qui affiche un labyrinthe
def afficher_carte(self): print(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simple():", "def simple():", "def sth():", "def saluda1(sujeto):\n print 'Hola '+sujeto+' !!'", "def plothub1():\r\n pass", "def saluda2(sujeto):\n print 'Hola %s !!' % sujeto", "def func():", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def getLabel2(*args):", "def getLabel2(*args):", "def piku():\n pass", "def apply():\r\n result = dataSampling(str, \"hhhhhhahhhhhahhahahahahhahahha\", 5)\r\n final_res = dataScreening(result, \"ha\")\r\n print(final_res)", "def example_function():", "def fn():", "def realsense():\n pass", "def print_result(label, result):\n print(f\"{label} {result}\\n\")", "def g():", "def print_result(result, label):\n print(\"\\n\", label)\n print(\"\\n\", result)\n # your code", "def mezclar_bolsa(self):", "def show_data():", "def disp_score():", "def show(*args):", "def show(*args):", "def show(*args):", "def show(*args):", "def falcon():", "def funca(la, lb):\n print(\"la =\", la)\n print(\"lb =\", lb)", "def subtractor(a, b): \n print(\"I'm a function. My name is {}\".format(subtractor.__name__))\n print(\"I'm about to subtract {} and {}\\n\\n\".format(a,b))\n return a - b # i output a value by using the return statement", "def demo():\n ...", "def verteileKarten(anzahlSpieler):\n pass", "def result_display(self, arg):\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)", "def GetHelix(helix):\r\n pass", "def print_out():\n pass", "def show_func(self, x):\n\n if (self._flag == 1):\n g = self.modelfun(x, *self._gf)\n elif (self._flag == 2):\n g = self.modelfun1(x, *self._gf)\n elif ((self._flag == 0) & (self._load != '0')):\n pass\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")\n\n return g", "def sample(self):", "def exo2():", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def call(self):", "def feat():\n pass", "def Label(self) -> str:", "def calculate_output(self):", "def test_transE_display():\n testing_function('transe', display=True)", "def show_result():\n print(\"I win!!\")", "def lro(self) -> global___Snippet.Lro:", "def healthcare():", "def exercise_b2_70():\r\n pass", "def display_message():", "def CL(self):", "def firstFunction(self):", "def function(self):\n raise NotImplementedError", "def substantiate():", "def main(_):\n\tlabel_wav()", "def disp(self, modulo=None):\r\n raise NotImplementedError('method disp() is not implemented')", "def trace(self,p):\n n = self\n c=0 \n while n!=None :\n print (n)\n n = n.pere\n c+=1\n print (\"Nombre d'étapes de la solution:\", c-1)\n return", "def feature():\n pass", "def showp():\n def show1(i):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.offsetAz' % (i+1) ,qmax_)\n e=SAC.queryDouble('carma.Ovro%d.Drive.Point.offsetEl' % (i+1) ,qmax_)\n return (a,e)\n print ' ant dAz dEl'\n for i in range(6):\n (a,e) = show1(i)\n print ' 00%d %7.3f %7.3f' % (i+1,a,e)", "def used_tex_func(val):\n return None", "def fn(): # fn definition # help2\r\n print(my_text)", "def main():\n printfunc(calc(menu()))", "def exercise_b2_69():\r\n pass", "def main():\n\n # on affiche la liste des cartes disponibles\n liste_des_cartes = functions.liste_cartes() + functions.liste_sauvegardes()\n functions.afficher_liste(liste_des_cartes)\n\n # selection d'une carte, un retour \"None\" indique une mauvaise saisie\n while True:\n choix = functions.choix_carte(\n input('''Indiquez le numéro de la carte choisie.\n Attention, si vous choisissez une nouvelle partie, la sauvegarde associée\n à la carte concernée sera remplacée. \\n'''), liste_des_cartes)\n if choix is not None:\n break\n\n # la carte est choisie, on peut générer un Labyrinthe\n laby = Labyrinthe(choix)\n # on affiche le tracé du labyrinthe\n print(laby.carte)\n\n # on lance la boucle du jeu\n while True:\n deplacements = input(\"\"\"Dans quelle direction voulez vous aller?\n \"E\" pour aller vers l'est, \"N\" pour aller vers le nord\n \"S\" pour aller vers le sud, \"O\" pour aller vers l'ouest\n Suivi d'un nombre (optionnel) pour le nombre de cases à parcourir\n \"Q\" pour sauvegarder et quitter\n \"\"\")\n # on vérifie que les données entrées par l'utilisateur sont valides\n instructions = functions.instructions_valide(deplacements)\n if instructions is not None:\n\n if instructions == \"quitter\":\n laby.sauvegarder_partie()\n break\n if instructions == \"lettre non valide\":\n print(\"La lettre entrée n'est pas valide \\n\")\n continue\n if instructions == \"non valide\":\n print(\"Les données entrées ne sont pas valides \\n\")\n continue\n else:\n # on vérifie si la partie est toujours active\n partie_en_cours = laby.effectuer_deplacements(instructions)\n if not partie_en_cours:\n # en cas de sortie trouvée, on supprime la sauvegarde\n laby.supprimer_partie()\n print(\"Partie terminée, sauvegarde supprimée\")\n break\n\n # On met en pause le système (Windows)\n os.system(\"pause\")", "def dummy_fn(self):\n\t\tpass", "def _regr_basic():", "def handle_output(self, workunit, label, s):\r\n pass", "def handle_output(self, workunit, label, s):\r\n pass", "def note():", "def ti_func_doc(self, label):\n latex = (\n r'\\begin{split}' + '\\n'\n r'0 = & ti - LHV_\\mathrm{fuel} \\cdot \\left[\\sum_i \\left('\n r'\\dot{m}_{\\mathrm{in,}i}\\cdot x_{\\mathrm{fuel,in,}i}\\right)-'\n r' \\dot{m}_\\mathrm{out,1}\\cdot '\n r'x_{\\mathrm{fuel,out,1}} \\right]\\\\' + '\\n'\n r'& \\forall i \\in \\text{combustion inlets}\\\\' + '\\n'\n r'\\end{split}'\n )\n return generate_latex_eq(self, latex, label)", "def view(self):\r\n\t\t\r\n\t\t# add zero term\r\n\t\tt = self\r\n\t\tif t == []:\r\n\t\t\tt = [Te(0)]\r\n\t\t\r\n\t\t# display\r\n\t\tfor i in t:\r\n\t\t\ti.view()\r\n\t\t\t\r\n\t\t# spacer\r\n\t\tprint(' ')\r\n\t\t\t\r\n\t\treturn None", "def result(self):", "def result(self):", "def goToZero():\n #on remet tout à zero\n usr_choice = 0\n fonctions_pfc.x = 0\n fonctions_pfc.y = 0\n fonctions_pfc.result = \"\"\n fonctions_pfc.pc_score = \" SCORE DU PC : \"\n fonctions_pfc.usr_score = \" SCORE DU PC : \"\n #on ré-affiche tous les composants\n display_pack_component()", "def spiel(self, *args, **kwargs):\n\t\t\n if kwargs.get('h'):\n print(\"\\nSpiel ( FussballToto, 7 Mannschaften )\\n\")\n print(\"Aufruf t . spiel( /[ tipp ] /[, m ] )\\n\")\n print(\" t Toto-Objekt\")\n print(\" tipp abgegebener Tipp (Liste mit 7 Zahlen aus {0, 1, 2})\")\t\t\t\n print(\" fehlt die Angabe, wird ein zufälliger Tipp angenom-\")\n print(\" men\")\t\t\t\n print(\" m Anzahl Spiele (mit dem gleichen Tipp); Standard=1\\n\")\n print(\"Zusatz z=ja Es wird ein zufälliger Tipp angenommen\")\n print(\" (ein eventuell angegebener Tipp wird nicht beachtet)\")\n print(\" d=ja Bei Angabe von m > 1 Rückgabe einer DatenReihe\")\n print(\" mit der Anzahl Richtige je Spiel\\n\")\t\t\t\n return\t\t\t\n\t \n def zuf_tipp()\t :\n return zuf_zahl((0, 2), 7)\t\t\n\t \n zufall = False\t \n if len(args) not in (0, 1, 2):\n print('zufall: 0 bis 2 Argumente angeben')\n return\n if len(args) == 0:\n m = 1\n zufall = True\t\t\t\n elif len(args) == 1:\n m = args[0]\n if isinstance(m, (int, Integer)):\t\n zufall = True\t\t\t\n elif isinstance(m, (list, tuple)):\n tipp = m\n m = 1\n else:\t\t\t\t\n print('zufall: positive ganze Zahl oder Tipp-Liste angeben')\n elif len(args) == 2:\n tipp, m = args\t\n\n if m <= 0:\n print('zufall: Anzahl der Spiele >= 1 angeben')\n return\t\n\n if kwargs.get('z'):\n zufall = True\n\n if zufall:\n tipp = zuf_tipp()\n\t\t\t\n if len(tipp) != 7 or not all([x in (0,1,2) for x in tipp]):\t\t\t\n print('zufall: Tipp als Liste von 7 Zahlen aus (0,1,2) angeben')\n return\t\t\t\n\t\t\t\n ergebnisse = zuf_tipp()\t\t\t\n\n def dm(x):\n return display(Math(x))\t\t\t\t\n\n print(' ')\n\t\t\t\n if m == 1:\n if not zufall:\t\t\n dm('\\\\text{Tipp}' + '\\\\quad\\\\quad \\,:' + latex(tipp))\n else:\n dm('\\\\text{Tipp}' + '\\\\quad\\\\quad :\\;' + latex(tipp) + '\\;\\;\\\\text{(zufällig)}')\n dm('\\\\text{Ergebnisse :} \\;' + latex(ergebnisse))\t\n erg = 0\t\t\t\n for i, t in enumerate(tipp):\n if t == ergebnisse[i]:\n erg += 1\n if erg == 1:\t\t\t\t\t\n dm(latex(erg) + '\\;\\\\text{Richtiger}')\t\n else:\t\t\t\t\t\n dm(latex(erg) + '\\;\\\\text{Richtige}')\t\n else:\t\t\t\n summen = {}\n dr = []\t\t\t\n for i in range(m):\n erg = zuf_tipp()\n sum = 0\t\t\t\t\n for j, t in enumerate(tipp):\n if t == erg[j]:\n sum += 1\n if kwargs.get('d'):\n dr += [sum]\t\t\t\t\t\t\n try:\t\t\t\t\t\t\n summen[sum] += 1\n except KeyError:\n summen[sum] = 1\n dm(latex(m) + '\\; \\\\text{Spiele}')\n dm('\\\\text{Spielergebnisse (Richtige : Anzahl Spiele, relative Häufigkeit, \\\n \t\t\ttheoretische}') \n dm('\\\\text{Wahrscheinlichkeit)}')\t\t\t\n u = Urne([0, 1, 2], 7, f=anzahl(1), info=False)\t\t\t\n for s in summen:\n sum = summen[s]\t\t\n dm(latex(s) + '\\\\text{ : } \\\\quad' + latex(sum) + '\\\\quad' + \\\n\t\t\t\t latex(format(float(sum/m), '.4f')) + '\\\\quad ' + \\\n\t\t\t\t latex(format(float(u.P(s)), '.4f')) + '=' + latex(u.P(s)))\n print(' ') \n if kwargs.get('d'):\n print('Rückgabe einer DatenReihe (Anzahl Richtige je Spiel)')\t\t\n return DatenReihe(dr)", "def display_problem():\n return \"\\nFind the difference between the sum of the squares of the first one hundred natural numbers and the \" \\\n \"square of the sum.\\n\"", "def output(self):\r\n self.logic ( )\r\n return self.output", "def test(): # TO BE DELETED WHEN PROGRAM COMPLETED\n print('methode test')", "def exercise2(self, param1, param2):\n print param1 / param2\n return", "def sample(self, x):", "def showm():\n def show1(i):\n coeff=[]\n for m in range(5):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.m%d' % (i+1,m+1) ,qmax_)\n coeff.append(a)\n for o in range(3):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.o%d' % (i+1,o+1) ,qmax_)\n coeff.append(a)\n return coeff\n print ' ant m1 m2 m3 m4 m5 o1 o2 o3'\n for i in range(6):\n m = show1(i)\n print ' 00%d %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % (i+1,m[0],m[1],m[2],m[3],m[4],m[5],m[6],m[7])", "def nr():\n pass", "def subtractor(a,b): \n return a-b # i output a value by using the return statement", "def func_case(self):\n test.success(\"\")", "def display_message():\n\tprint(\"In this chapter we will be learning how to write functions\")", "def describe():", "def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')", "def fn(self):\n return \"Hello\"", "def input(self):", "def printme(parameters): # create a function called printme using def\n print(parameters) # prints anything that is stored inside parameters\n return", "def echo(self, foetus):\n Allele_semblable = 0\n for Allele in range(3):\n if self.allele[Allele] in foetus.allele and self.allele[Allele] != 0.0:\n Allele_semblable = Allele\n if Allele_semblable == 0:\n Allele_Echo = self.allele[Allele_semblable + 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3\n elif Allele_semblable == 1:\n Allele_Echo = self.allele[Allele_semblable - 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3", "def default_display_function(feature):\n # n_samples = min(n_samples, feature.shape[0])\n IPython.display.display(widgets.Box(layout=widgets.Layout(height=\"2.5%\")))\n IPython.display.display(feature)\n IPython.display.display(widgets.Box(layout=widgets.Layout(height=\"2.5%\")))", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def introduction_function(self):\n pass", "def printfunc(self):\n zero1=self.Newton(True)\n print \"Using initial porition %0.2f ,%0.2f\" %(self.x_init,self.y_0)\n print \"extremum calculated witn Newton-Rapson: %0.2f ,%0.2f.\"%(zero1[0],zero1[1])\n zero2=self.Newton(False)\n print \"extremum calculated witn Secant: %0.2f ,%0.2f.\" %(zero2[0],zero2[1])\n xlist=np.arange(self.x_0-10,self.x_0+10,0.01)\n ylist=np.arange(self.y_0-10,self.y_0+10,0.01)\n X,Y=np.meshgrid(xlist,ylist)\n Z=self.sfunc(X,Y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.plot(xlist, ylist, self.sfunc(xlist,ylist), 'g-',label='function $e^{(-(x-%0.2f)^2-(y-%0.2f)^2)}$' %(self.x_0,self.y_0))\n ax.contour(X, Y, Z)# colors = 'k', linestyles = 'solid')\n ax.plot([zero1[0]], [zero1[0]], self.sfunc(zero1[0],zero1[1]),'bo',label='extrema using Newton-Rapson (%0.2f; %0.2f)'%(zero1[0],zero1[1]))\n ax.plot([zero2[0]], [zero2[0]], self.sfunc(zero2[0],zero2[1]),'ro',label='extrema using Seacent (%0.2f; %0.2f)'%(zero2[0],zero2[1]))\n ax.legend()\n plt.show()", "def display_results():\n pass", "def __call__():" ]
[ "0.63914204", "0.63914204", "0.5986804", "0.59826845", "0.59645784", "0.5792992", "0.5778979", "0.5771139", "0.5771139", "0.5771139", "0.57593817", "0.57593817", "0.57460934", "0.5673789", "0.5656487", "0.5655953", "0.56223005", "0.5616206", "0.560993", "0.56080663", "0.560432", "0.5603471", "0.5591425", "0.55910933", "0.55910933", "0.55910933", "0.55910933", "0.55624384", "0.55190325", "0.55054754", "0.5468469", "0.5447353", "0.54365796", "0.5429421", "0.5418774", "0.5406786", "0.53993124", "0.53917307", "0.53776854", "0.5367113", "0.5364031", "0.53604114", "0.53365034", "0.53297204", "0.5324961", "0.5319152", "0.53172207", "0.53134525", "0.5309881", "0.5304874", "0.5299356", "0.5291189", "0.52819794", "0.52743906", "0.52649033", "0.52600974", "0.52501875", "0.5245689", "0.5245638", "0.52240884", "0.52229345", "0.52125", "0.52086633", "0.5206865", "0.51891965", "0.518201", "0.518201", "0.51811594", "0.5180423", "0.51760614", "0.51677775", "0.51677775", "0.5161713", "0.5156817", "0.5156572", "0.5153086", "0.51529914", "0.51438123", "0.5142331", "0.51417357", "0.5141168", "0.51378894", "0.5137704", "0.5137194", "0.51368064", "0.5132928", "0.51312023", "0.512844", "0.5127816", "0.51239777", "0.51233834", "0.5123249", "0.5123249", "0.5123249", "0.5123249", "0.5123249", "0.51231515", "0.5120981", "0.5120627", "0.5117872", "0.5117194" ]
0.0
-1
do api request, parse error, return response.
def get_subtitleinfo(fileFullName): sys.stdout.write("Requesting subtitle info...\n") #接口获取字幕信息 response = requests.post( "https://www.shooter.cn/api/subapi.php", verify=False, params= { 'filehash': ComputeFileHash(fileFullName), 'pathinfo': os.path.realpath(fileFullName), 'format': 'json', 'lang': "Chn", }, ) #未找到字幕处理 if response.text == u'\xff': sys.stderr.write("Subtitle not found.\n") sys.exit(1) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _api_request(self,\n method: str,\n path_url: str,\n params: Dict[str, Any] = {}) -> Dict[str, Any]:\n base_url = f\"https://{global_config_map['gateway_api_host'].value}:\" \\\n f\"{global_config_map['gateway_api_port'].value}\"\n url = f\"{base_url}/{path_url}\"\n client = await self._http_client()\n if method == \"get\":\n if len(params) > 0:\n response = await client.get(url, params=params)\n else:\n response = await client.get(url)\n elif method == \"post\":\n response = await client.post(url, data=params)\n\n parsed_response = json.loads(await response.text())\n if response.status != 200:\n err_msg = \"\"\n if \"error\" in parsed_response:\n err_msg = f\" Message: {parsed_response['error']}\"\n raise IOError(f\"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}\")\n if \"error\" in parsed_response:\n raise Exception(f\"Error: {parsed_response['error']}\")\n\n return parsed_response", "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def _api_request(self, endpoint, params=None):\n \n if params:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header},\n params=params)\n else:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header})\n code = response.status_code\n if 200 <= code < 300:\n logging.debug(f\"API call: {self.api_url}/{endpoint} | {code}\")\n encoding = response.encoding\n raw = response.content\n return json.loads(raw.decode(encoding))\n elif code > 500:\n raise APIAuthException\n else:\n logging.error(f\"ERROR: Bad API call: {self.api_url}/{endpoint} | {code}\")", "def get_response(request):\n uri = \"api.php\"\n response_body = None\n url = proto + \"://\" + host + \"/\" + api_dir + \"/\" + uri\n logger.debug(\"Enter get_response(url=\" + url + \") with request:\")\n logger.debug(request)\n if proto == \"http\":\n conn = httplib.HTTPConnection(host)\n elif proto == \"https\":\n conn = httplib.HTTPSConnection(host)\n else:\n logger.error(\"Unsupported protocol \" + proto)\n return None\n http_response = \"\"\n try:\n conn.connect()\n logger.debug(\"Sending to \" + host + \":\")\n logger.debug(request)\n data_json = json.dumps(request)\n data_json_enc = data_json\n data_json_enc_urlenc = urllib.urlencode({'data': data_json_enc})\n conn.putrequest('POST', \"/\" + api_dir + \"/\" + uri)\n headers = {'Content-Length': \"%d\" % (len(data_json_enc_urlenc)),\n 'Content-Type': 'application/x-www-form-urlencoded'}\n for k in headers:\n conn.putheader(k, headers[k])\n conn.endheaders()\n conn.send(data_json_enc_urlenc)\n http_response = conn.getresponse()\n if http_response.status == 200:\n response_body = http_response.read()\n logger.debug(\"Response from server: %s\" % response_body)\n if not response_body:\n return None\n url = \"%(proto)s://%(host)s/%(api_dir)s/%(uri)s\" % {\n \"proto\": proto,\n \"host\": host,\n \"api_dir\": api_dir,\n \"uri\": uri\n }\n logger.debug(\"Response from %(url)s : %(resp)s\" % {\n \"url\": url,\n \"resp\": response_body\n })\n else:\n logger.error(\"HTTP error %d %s\" % (http_response.status, http_response.reason))\n except socket.error as err:\n logger.error(\"Exception while making request \" + url)\n logger.error(err)\n logger.error(\"Please check that DNS server is reachable and works\")\n return None\n except exceptions.KeyError as err:\n logger.error(\"Failed to decode response from server %s\" % http_response)\n logger.error(\"Could not find key %s\" % err)\n return None\n finally:\n conn.close()\n return response_body", "def __exec_request(self, URL) -> Any:\n headers = {\n \"X-ELS-APIKey\": self.config['apikey'],\n \"Accept\": 'application/json'\n }\n\n request = requests.get(\n URL,\n headers=headers\n )\n self._status_code = request.status_code\n\n if request.status_code == 200:\n return json.loads(request.text, strict=False)\n else:\n return \"failed\"", "def _http_get(self, url, params={}):\n if not self.token:\n self.get_token()\n headers = {'Authorization': self.token, 'Accept': 'application/json; indent=4'}\n url = self.server + '/api2' + url\n try:\n r = requests.get(url=url, headers=headers, params=params)\n except requests.exceptions.RequestException as e:\n return check_failed(e)\n # raise ClientHttpError(None, e)\n if r.status_code != 200:\n return check_failed(r.status_code)\n # return ClientHttpError(r.status_code, json.loads(r.text)['error_msg'])\n try:\n data = json.loads(r.text)\n except:\n data = r.text\n # TODO: check data\n return data", "def _get_api_request(url):\n req = requests.get(url)\n\n if not req.status_code == 200:\n print(\"Error getting API request:\", url)\n print(\"Status code:\", req.status_code)\n print(\"Error:\", req.text)\n exit(200)\n\n data = None\n try:\n data = req.json()\n except JSONDecodeError:\n print(\"WarcraftLogs did not return proper JSON, it is likely down for maintenance.\")\n print(\"Request response:\", req.text)\n exit(300)\n\n return data", "def call_api(self):\n #generate the final call string\n self.generate_call_string();\n #debug\n #print (self.call_url);\n \n #finally make api call\n try: \n #pass; \n self.return_articles= json.loads(urlopen(self.call_url).read());\n #print json.dumps(self.return_articles, indent=4, sort_keys=True)\n except :#elaborate on this later\n print(\"Exception,response did not go through:\");\n e = sys.exc_info()[0]\n print(e);\n raise;\n return;", "def call_api_endpoint(url, data):\n try:\n \n req = urllib2.Request(url, data, {'Content-Type': 'application/json'})\n \n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n return response\n\n \n except urllib2.HTTPError, e:\n if e.code == 400:\n return -1", "def _request(self, endpoint: str = \"/api/\", params: object = {}) -> dict:\n ret: dict = {}\n try:\n if not self.api_key:\n ret[\"error\"] = \"API key is empty\"\n raise APIError(ret['error'])\n\n r = requests.get(f\"{self.apibase}{endpoint}\",\n params=params,\n headers=self.headers,\n verify=self.verify_ssl)\n response_data = orjson.loads(r.text)\n except orjson.JSONDecodeError:\n ret[\"error\"] = \"Failed to parse response data to JSON\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n except requests.HTTPError:\n ret[\"error\"] = f\"{r.status_code}: {r.reason}\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n\n if ret.get('error', None):\n raise APIError(ret['error'])\n check_status_code(request=r, debug=self.debug, ret=ret)\n\n ret = response_data\n return ret", "def _api_request(*args, **kwargs):\n response = requests.request(*args, **kwargs)\n return APIResponse(response)", "def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json", "def _api_request(self, path, method, data=None, query=None):\n\n url = request_url(\n self.config['secure'],\n self.config['hostname'],\n self.config['port'],\n path,\n query,\n )\n\n try:\n resp = request(\n url,\n method,\n self._headers(),\n data,\n self.config['timeout'],\n )\n\n return Response(\n resp.get('meta', {}),\n # Response info may have 'object' or 'objects' key, depending\n # on whether there are 1 or multiple results.\n resp.get('object', resp.get('objects'))\n )\n except HTTPError as e:\n response = e.read()\n fallback = '{0} {1}'.format(e.code, e.msg)\n\n if isinstance(response, bytes):\n data = response.decode('utf8')\n else:\n data = response\n\n error = json.loads(data).get('error', {})\n message = error.get('message', fallback)\n raise HTTPResponseError(message, status_code=e.code, cause=e)", "def http_request(endpoint, data, method='POST'):\n url = BASE_API + endpoint\n data['authkey'] = AUTH_KEY\n\n response = requests.request(method, url=url, data=data, timeout=300, verify=VERIFY)\n if response.status_code == 200:\n try:\n return response.json()\n except Exception as e:\n return_error('Response JSON decoding failed due to {}'.format(str(e)))\n\n else:\n return_error('API Returned, {}:{}'.format(response.status_code, response.reason))", "def _request_get(self, url):\n try:\n r = requests.get(url)\n except Exception:\n raise Exception('Cannot connect')\n if (r.status_code != 200):\n raise Exception('%d %s' % (r.status_code, r.text))\n if (not r.text) or (not r.text.strip()):\n raise Exception('Empty answer')\n try:\n response = json.loads(r.text)\n except Exception:\n raise Exception('Cannot parse response')\n return response", "def _handle_api_call(self, url):\n response = urlopen(url)\n url_response = response.read()\n json_response = loads(url_response)\n \n if not json_response:\n raise ValueError('Error getting data from the api, no return was given.')\n elif \"Error Message\" in json_response:\n raise ValueError(json_response[\"Error Message\"])\n elif \"Information\" in json_response and self.treat_info_as_error:\n raise ValueError(json_response[\"Information\"])\n \n return json_response", "def __make_api_get(self, api_path):\n try:\n self.last_response = urllib2.urlopen(\n self.api_server + api_path, cafile=self.cacert_path)\n json_data = self.last_response.read()\n\n # Check for errors\n except urllib2.HTTPError as err:\n error = \"API HTTP error [%s] - '%s'\" % (err.code, err.read())\n raise EFIgyCliError(error, self.last_response)\n\n except urllib2.URLError as err:\n error = 'Problem calling API at location %s - %s' % (\n self.api_server + api_path, err)\n raise EFIgyCliError(error, self.last_response)\n\n # Decode json response into an object\n try:\n ret = json.loads(json_data)\n except ValueError as err:\n error = \"Problem deserialising data, expecting JSON.\\nError: %s\\nData: %s\" % (\n err, json_data)\n raise EFIgyCliError(error, self.last_response)\n\n # Return JSON deserialised object\n return ret", "def __request(self,endpoint):\n apiRequest = requests.get(\"%s/%s\" % (self.baseurl,endpoint), \n auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))\n try:\n json = apiRequest.json()\n return json\n except JSONDecodeError:\n print(\"Failed to download or failed to parse JSON.\")\n print(apiRequest)\n return None", "async def request(\r\n self, method: str, url: str, params: dict = None, data: dict = None\r\n ):\r\n async with self._session.request(\r\n method,\r\n url,\r\n params=params,\r\n json=data,\r\n headers={\"Authorization\": \"Bearer \" + self._token},\r\n ) as resp:\r\n if resp.status == 200:\r\n return await resp.json()\r\n if resp.status in (400, 422, 429, 500):\r\n data = None\r\n try:\r\n data = await resp.json()\r\n except Exception: # pylint: disable=broad-except\r\n pass\r\n raise APIResponseError(\r\n resp.request_info,\r\n resp.history,\r\n status=resp.status,\r\n message=resp.reason,\r\n headers=resp.headers,\r\n data=data,\r\n )\r\n resp.raise_for_status()", "def _send_in_request(self):\n try:\n req_params = urllib.urlencode(self._params)\n except Exception as ex:\n raise ProxyError('Error signing request string') \n \n try:\n self.logger.debug('Send api request to: %s' % self._api_url)\n self.logger.debug('Request params: %s' % req_params)\n self.logger.debug('Request timeout: %s' % self._timeout)\n if len(self._params) > 0:\n f = urllib2.urlopen(self._api_url, req_params, self._timeout)\n response = f.read()\n self.logger.debug('Response length: %s' % len(response))\n f.close() \n return response\n else:\n return \"{'command':'ping', 'message':'ok'}\" \n except (urllib2.URLError) as ex:\n self._error = json.loads(ex.fp.readline()).values()\n raise ProxyResponseError()\n except (IOError) as ex:\n raise ProxyError(ex)", "def api_req(dev, api_call):\r\n import xmltodict\r\n import logging\r\n try:\r\n r = requests.get(dev + ':8060' + api_call, timeout=5)\r\n except Exception as exc:\r\n response = [\"ERR\", exc]\r\n return response[0]\r\n except ConnectionError as connerr:\r\n response = [\"ERR\", connerr]\r\n return response[0]\r\n except TimeoutError as toerr:\r\n response = [\"ERR\", toerr]\r\n return response[0], toerr\r\n r_code = r.status_code\r\n if r_code == 200:\r\n print(\"REQUEST WAS A SUCCESS. DEVICE RETURNED: {} \".format(str(r)))\r\n r2 = r.text\r\n response = xmltodict.parse(r2, xml_attribs=False)\r\n return response\r\n else:\r\n response = \"UnknownERR\"\r\n dev.state(DISABLED)\r\n return msg_box(response)", "def _api_request(date: str, api_url: str) -> Dict[str, str]:\n try:\n data = json.loads(urlopen(\n Request(f\"{api_url}?at={quote(date)}\")\n ).read().decode('utf-8'))\n except HTTPError as e:\n data = json.loads(e.file.read().decode('utf-8'))\n if \"message\" in data:\n raise ValidationError(data[\"message\"])\n else:\n raise ValidationError(f\"Service unavailable ({e}\")\n return data", "def parse_api(baseurl):\n\t# Make 4 api calls in total\n\tfor call in range(1, 5):\n\t\t# Write an error to file if no response after 4 attempts\n\t\tif call == 4:\n\t\t\twith open('errors.txt', 'a') as file:\n\t\t\t\tfile.write(\"Server is not responding after 4 attempts. Time: {}\\n\".format(\n\t\t\t\t\tdt_to_string(datetime.datetime.now())\n\t\t\t\t))\n\t\t\treturn False\n\t\ttry:\n\t\t\t# download data from api\n\t\t\tresult = urllib.request.urlopen(baseurl).read()\n\t\t\treturn result\n\t\texcept urllib.error.URLError:\n\t\t\t# wait 10 minutes if server is not responding and make another call\n\t\t\tprint(\"Seems like server is not responding. Will try again in 10 minutes...\")\n\t\t\ttime.sleep(660)", "def _handle_api_result(self, response, params, tries, wait, ae_retry):\n try:\n res = response.json()\n except ValueError:\n e = \"API query failed: JSON could not be decoded.\"\n raise exceptions.APIError(e)\n\n if \"warnings\" in res:\n for name, value in res[\"warnings\"].items():\n try:\n warning = value[\"warnings\"]\n except KeyError:\n try:\n warning = value[\"*\"]\n except KeyError:\n warning = value\n self._logger.warning(\"API warning: %s: %s\", name, warning)\n\n if self._should_save_cookiejar():\n self._save_cookiejar()\n\n try:\n code = res[\"error\"][\"code\"]\n info = res[\"error\"][\"info\"]\n except (TypeError, KeyError): # If there's no error code/info, return\n if \"query\" in res and \"tokens\" in res[\"query\"]:\n for name, token in res[\"query\"][\"tokens\"].items():\n self._tokens[name.split(\"token\")[0]] = token\n return res\n\n if code == \"maxlag\": # We've been throttled by the server\n if tries >= self._max_retries:\n e = \"Maximum number of retries reached ({0}).\"\n raise exceptions.APIError(e.format(self._max_retries))\n tries += 1\n msg = 'Server says \"{0}\"; retrying in {1} seconds ({2}/{3})'\n self._logger.info(msg.format(info, wait, tries, self._max_retries))\n sleep(wait)\n return self._api_query(params, tries, wait * 2, ae_retry=ae_retry)\n elif code in [\"assertuserfailed\", \"assertbotfailed\"]: # AssertEdit\n if ae_retry and all(self._login_info) and not self._oauth:\n # Try to log in if we got logged out:\n self._login(self._login_info)\n if \"token\" in params: # Fetch a new one; this is invalid now\n params[\"token\"] = self.get_token(params[\"action\"])\n return self._api_query(params, tries, wait, ae_retry=False)\n if not all(self._login_info) and not self._oauth:\n e = \"Assertion failed, and no login info was provided.\"\n elif code == \"assertbotfailed\":\n e = \"Bot assertion failed: we don't have a bot flag!\"\n else:\n e = \"User assertion failed due to an unknown issue. Cookie or OAuth problem?\"\n raise exceptions.PermissionsError(\"AssertEdit: \" + e)\n else: # Some unknown error occurred\n e = 'API query failed: got error \"{0}\"; server says: \"{1}\".'\n error = exceptions.APIError(e.format(code, info))\n error.code, error.info = code, info\n raise error", "def __apiRequest(self, url, parms={}):\n authparms = self.__addAuthParms(parms);\n request = self.http.request('GET', url, fields=authparms)\n if request.status != 200:\n raise ApiCommunicationError('Failed to retrieve data from Marvel, HTTP Status {}'.format(request.status))\n else:\n return json.loads( request.data.decode('utf-8') )", "def api_call():\n\n json_str = load_input()\n output = {\n 'inputs': json_str,\n 'results': 'cool results'}\n\n return json.dumps(output), 200, {'Content-Type': 'text/plain;charset=utf-8'}", "def call(self):\n # if this is a POST request, process data\n if self.data:\n post_json = json.dumps(self.data)\n values = {'json': post_json, 'apikey': API_KEY}\n post = urllib.parse.urlencode(values)\n\n else:\n post = None\n\n req = urllib.request.Request(self.url, post)\n\n try:\n self.response = urllib.request.urlopen(req, timeout=self.timeout)\n\n except (URLError, HTTPError, timeout) as error:\n self.response = error", "def api_call(uri, name, method='get', **kwargs):\n # Ensure uri is valid\n if not bool(urlparse.urlparse(uri).netloc):\n print(\"Error: {0} is not a valid url\").format(uri)\n return False\n func = getattr(requests, method)\n args = {}\n for key, value in kwargs.iteritems():\n args[key] = value\n try:\n response = func(uri, **args)\n except requests.exceptions.Timeout:\n print \"The api call to {0} timed out\".format(uri)\n except requests.exceptions.TooManyRedirects:\n print \"The api call to {0} appears incorrect, returned: too many re-directs\".format(uri)\n except requests.exceptions.RequestException as error:\n print \"The api call to {0} failed\\n Error {1}\".format(uri, error)\n sys.exit(1)\n try:\n response_dictionary = response.json()\n except ValueError:\n return response\n #If API call errors out print the error and quit the script\n if response.status_code not in [200, 201]:\n if 'errors' in response_dictionary:\n errors = response_dictionary.pop('errors')\n first_error = errors.pop()\n elif 'error' in response_dictionary:\n first_error = response_dictionary.pop('error')\n else:\n first_error['message'] = \"No error message provided by response\"\n msg = \"{0} returned an error, exiting the script.\\n\".format(name)\n msg += \"Status Code: {0} \\n\".format(response.status_code)\n msg += \"Error: {0}\".format(first_error['message'])\n print msg\n return False\n else:\n return response_dictionary", "def perform_request(self,\n request: RequestBase,\n method: str='POST'\n ):\n headers = {\n 'Accept': 'application/json',\n 'User-Agent': self.user_agent()\n }\n if APIAuthentication.use_http_auth:\n headers['Authorization'] = 'Basic {auth}'.format(auth=self.get_auth())\n\n # Lazy loader for api credentials.\n if request.requires_api_token() and ParamValidator.is_empty(request.api_token)\\\n and ParamValidator.not_empty(APIAuthentication.api_token):\n request.api_token = APIAuthentication.api_token\n if request.requires_service_id() and ParamValidator.is_empty(request.service_id)\\\n and ParamValidator.not_empty(APIAuthentication.service_id):\n request.service_id = APIAuthentication.service_id\n\n # Build url\n url = \"{0}/{1}\".format(PAYNL_END_POINT, request.get_url())\n parameters = request.get_parameters()\n if APIAuthentication.use_http_auth and 'token' in parameters:\n del parameters['token']\n\n if self.print_debug:\n print(\"Calling {} using {}\".format(url, method))\n print(\"HTTP Headers: {}\".format(json.dumps(headers)))\n print(\"Params: {}\".format(json.dumps(parameters)))\n\n if method.upper() == 'GET':\n response = requests.get(url, verify=True, headers=headers, params=parameters)\n else:\n response = requests.post(url, verify=True, headers=headers, data=parameters)\n\n if response.status_code not in self.__supported_status_codes:\n response.raise_for_status()\n\n if self.print_debug:\n print(\"Response object: {}\".format(response))\n print(\"Raw response: {}\".format(response.text))\n\n # Now the we have a response, let the request class handle the response.\n request.raw_response = response.text\n\n if self.print_debug:\n print(type(request.response))\n\n if request.response.is_error():\n raise ErrorException(request.response.request)", "def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])", "def _call_api(endpoint, query, data):\n headers = {\"Content-Type\": \"application/json\", }\n response = InnerTube._execute(f\"{endpoint}?{parse.urlencode(query)}\", \"POST\", headers=headers, data=data)\n\n try:\n resp = json.loads(response.read())\n except JSONDecodeError as e:\n log(f\"{__class__.__name__}: Parsing response error: {e}\")\n else:\n return resp", "def _doRequest(self, httpClientMethod, *args):\n try:\n resp = httpClientMethod(*args)\n return resp.json()\n except RequestException as e:\n raise checkedError(e)", "def call_api(url):\n\n req = requests.get(url)\n return req", "def call_api(url):\n\n req = requests.get(url)\n return req", "def _process_url(self, url):\n response = requests.get(url, timeout=self.TIMEOUT)\n try:\n ret = response.json()\n except JSONDecodeError:\n self.log.exception(\"JSONDecodeError, response: %r, response.text: %r\", response, response.text)\n ret = {\"error\": \"The api broke.\"}\n return ret", "def api_call(url, method, debug, **kwargs):\n resp = None\n attempt = 0\n maxattempts = 3\n req = Request(method.upper(), url, **kwargs)\n\n if debug:\n print(\"DEBUG: Request ({}) {}\".format(method.upper(), url))\n\n while True:\n try:\n attempt += 1\n resp = Session().send(\n Session().prepare_request(req), verify=True)\n resp.raise_for_status()\n break\n except (HTTPError, ConnectionError, Timeout) as ex:\n if attempt >= maxattempts:\n abort(ex.message)\n else:\n time.sleep(1)\n continue\n except RequestException as ex:\n abort(ex.message)\n\n if resp is not None:\n return resp\n else:\n abort(\"Error making API call to URL: \" % url)", "def call(self, http_method, api_path, params=None, raw_data=False):\n\n # get function of requests package\n requests_func = getattr(requests, http_method.lower())\n\n # parse parameters\n req_params = {}\n file_params = {}\n\n if params is not None:\n for key, value in six.iteritems(params):\n if isinstance(value, (datetime.date,\n datetime.datetime,\n float,\n int)):\n req_params[key] = six.text_type(value)\n elif isinstance(value, six.string_types):\n req_params[key] = six.text_type(value)\n elif hasattr(value, \"read\"):\n filename = os.path.split(value.name)[1]\n if not _is_string_ascii_encodeable(filename):\n b64_key = key + \"name_b64enc\"\n byte_value = filename.encode(\"utf-8\")\n b64_value = base64.b64encode(byte_value).decode(\"utf-8\")\n\n filename = \"@param=%s\" % b64_key\n req_params[b64_key] = b64_value\n file_params[key] = (filename, value, \"application/octet-stream\")\n else:\n raise VMRayRESTAPIError(\"Parameter \\\"{}\\\" has unknown type \\\"{}\\\"\".format(key, type(value)))\n\n # construct request\n if file_params:\n files = file_params\n else:\n files = None\n\n # we need to adjust some stuff for POST requests\n if http_method.lower() == \"post\":\n req_data = req_params\n req_params = None\n else:\n req_data = None\n\n # do request\n result = requests_func(self.server + api_path,\n data=req_data,\n params=req_params,\n headers={\"Authorization\": \"api_key {}\".format(self.api_key)},\n files=files,\n verify=self.verify_cert,\n stream=raw_data)\n handle_rest_api_result(result)\n\n if raw_data:\n return result.raw\n\n # parse result\n try:\n json_result = result.json()\n except ValueError:\n raise ValueError(\"API returned invalid JSON: {}\".format(result.text))\n\n # if there are no cached elements then return the data\n if \"continuation_id\" not in json_result:\n return json_result.get(\"data\", None)\n\n data = json_result[\"data\"]\n\n # get cached results\n while \"continuation_id\" in json_result:\n # send request to server\n result = requests.get(\"{}/rest/continuation/{}\".format(self.server, json_result[\"continuation_id\"]),\n headers={\"Authorization\": \"api_key {}\".format(self.api_key)},\n verify=self.verify_cert)\n handle_rest_api_result(result)\n\n # parse result\n try:\n json_result = result.json()\n except ValueError:\n raise ValueError(\"API returned invalid JSON: {}\".format(result.text))\n\n data.extend(json_result[\"data\"])\n\n return data", "def call_api(self, url, method='GET', headers=None, params=None, data=None):\n r = requests.request(method=method, url=url, headers=headers, params=params, data=data)\n \n self.log.debug(f'Called endpoint {url} with result {r}')\n\n try:\n jayson = json.loads(r.text)\n return jayson\n except:\n self.log.info(f'ERROR! Text of response object: {r.text}')", "async def _make_request(self, url: str, params, server_id: str):\n headers = {\n 'X-Response-Control': 'minified',\n 'User-Agent': 'Friendly Red bot'\n }\n\n if server_id in self.config:\n if 'API_TOKEN' in self.config[server_id]:\n headers['X-Auth-Token'] = self.config['API_TOKEN']\n else:\n await self.bot.say(box('Requests made without an authentication token are limited to 100 requests per 24 hours.\\nYou can request a key by registering at http://api.football-data.org and setting it via [p]football tokenset.'))\n\n async with aiohttp.get(url, headers=headers, params=params) as r:\n if r.status == 200:\n data = await r.json()\n return data\n elif r.status == 400:\n await self.bot.say(box('Bad Request [400]:\\nYour request was malformed most likely the value of a Filter was not set according to the Data Type that is expected.'))\n return\n elif r.status == 403:\n await self.bot.say(box('Restricted Resource [403]:\\nYou tried to access a resource that exists, but is not available for you. This can be out of the following reasons:\\n- the resource is only available to authenticated clients\\n- the resource is only available to donating clients\\n- the resource is not available in the API version you are using'))\n return\n elif r.status == 404:\n await self.bot.say(box('Not found [404]\\nYou tried to access a resource that doesn’t exist.'))\n return\n elif r.status == 429:\n await self.bot.say(box('Too many requests [429]\\nYou exceeded your allowed requests per minute/day depending on API version and your user status.\\nSee http://api.football-data.org/docs/v1/index.html#_request_throttling for more information.'))\n await self.bot.say(box('Requests reset in ' + r.headers['X-RequestCounter-Reset'] + ' seconds.'))\n return\n else:\n await self.bot.say(box('Pancake has no idea what you\\'ve done, seriously.'))\n await self.bot.say(box(r.status + '\\n' + r.json()['error']))\n return", "def _call(self, method, url, params):\n if not url.startswith('http'):\n url = self.root + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n r = self._session.request(method, url,\n headers=headers,\n proxies=self.proxies,\n params=params,\n timeout=self.requests_timeout)\n r.raise_for_status() # Check for error\n return r.json()", "def _get_request(url_root,api_key,path,response_type,params, ssl_verify):\n url = _url_builder(url_root,api_key,path,params)\n content = _fetch(url, ssl_verify)\n response = _dispatch(response_type)(content)\n return response", "def _do_request(self, url: str):\n\n self.debug.ok('method', self.method)\n\n if self.client.fake_response_path:\n with open(self.client.fake_response_path, 'r') as f:\n return constants.ResponseCode.OK, f.read()\n\n elif self.method == constants.RequestConst.GET:\n response = requests.get(\n url, headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok(\n constants.RequestConst.QUERY_PARAMETERS,\n self.parameters[constants.RequestConst.QUERY]\n )\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n elif self.method in [\n constants.RequestConst.POST,\n constants.RequestConst.PUT,\n constants.RequestConst.DELETE\n ]:\n if self.method == constants.RequestConst.POST:\n send_request = requests.post\n elif self.method == constants.RequestConst.PUT:\n send_request = requests.put\n elif self.method == constants.RequestConst.DELETE:\n send_request = requests.delete\n\n response = send_request(\n url, json=self.parameters[constants.RequestConst.QUERY],\n headers=self._headers(), timeout=self._timeout\n )\n\n self.debug.ok('payload', self.parameters[\n constants.RequestConst.QUERY\n ])\n self.debug.ok(constants.ResponseConst.RESPONSE_OBJECT, response)\n\n return response.status_code, response.text\n\n else:\n return constants.ResponseCode.NOT_FOUND, {}", "def pull(self, url, params=None, data=None, auth=None, method=\"GET\"):\n try:\n __method_name = inspect.currentframe().f_code.co_name\n if method == \"POST\":\n res = self.session.post(\n url=url,\n auth=auth,\n params=params,\n data=data,\n timeout=consts.API_TIMEOUT,\n )\n else:\n res = self.session.get(\n url=url,\n auth=auth,\n params=params,\n data=data,\n timeout=consts.API_TIMEOUT,\n )\n res.raise_for_status()\n if res and res.status_code in [200, 201]:\n self.applogger.debug(\n '{}(method={}) : {} : API call: Response received successfully. url=\"{}\" params=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n params,\n )\n )\n return res.json()\n else:\n self.applogger.error(\n \"{}(method={}) : {} : API call: Unknown status code or empty \"\n 'response: url=\"{}\" status_code=\"{}\" response=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n res.status_code,\n res.text,\n )\n )\n raise Exception(\"Received unknown status code or empty response.\")\n except requests.exceptions.HTTPError as ex:\n if res.status_code == 404:\n self.applogger.debug(\n '{}(method={}) : {} : API call: Got {} Status Code : url=\"{}\"'\n ' response=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n ex.response.status_code,\n ex.response.text,\n )\n )\n return {}\n else:\n self.applogger.error(\n '{}(method={}) : {} : API call: Unsuccessful response: url=\"{}\" status_code=\"{}\"'\n ' response=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n ex.response.status_code,\n ex.response.text,\n )\n )\n raise Exception(\"HTTP Error Occurred while getting response from api.\")\n except Exception as ex:\n self.applogger.error(\n '{}(method={}) : {} : API call: Unexpected error while API call url=\"{}\" error=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n str(ex),\n )\n )\n raise Exception(\"Error Occurred while getting response from api.\")", "async def handle_request(self, api_endpoint, api_version):\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n # will call process_get or process_post methods for the given API\n res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (\n validerr.absolute_path.pop(), validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise dberr\n except Exception as err: # pylint: disable=broad-except\n err_id = err.__hash__()\n res = 'Internal server error <%s>:' \\\n 'please include this error id in bug report.' % err_id\n code = 500\n LOGGER.exception(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n\n # raise tornado.web.HTTPError(status_code=444, reason='error happened')\n self.set_status(code)\n self.write(res)", "def _execApiCall(headers, params, method_name,\r\n domain='ma.gnolia.com',\r\n urlhead='/api/rest/1/'):\r\n \r\n if 'api_key' not in params and method_name not in ['echo', 'get_key']:\r\n raise MagnoliaException('Required API Key parameter missing')\r\n conn = httplib.HTTPConnection(domain)\r\n conn.request('POST', urlhead + method_name, params, headers)\r\n return conn.getresponse()", "def _parse_response(self, response, all_ops):\n try:\n parsed_response = json.loads(response)\n except Exception, e:\n raise ApiError(e)\n if 'error' in parsed_response: # needed anymore?\n raise ApiError(parsed_response['error'])\n # Return the true API return value.\n return parsed_response", "def api():\n try:\n data = json.loads(request.data)\n except:\n return jsonify({\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32700, \"message\": \"Parse error\"}, \"id\": None}), 500\n return jsonify(hwi.jsonrpc(data))", "def _request(self, method, url, body=None, headers=None, serialize=True):\n headers = headers or {}\n headers['Accept'] = 'application/json'\n headers['User-Agent'] = 'paxes-httpclient'\n if body and not 'Content-Type' in headers:\n headers['Content-Type'] = 'application/json'\n if self.auth_token:\n headers['X-Auth-Token'] = self.auth_token\n LOG.debug('>> %s %s, %s, %s' % (method, url, headers, body))\n conn = self._create_connection(url)\n if body and serialize:\n body = json.dumps(body)\n conn.request(method, url, body, headers)\n res = conn.getresponse()\n header_list = res.getheaders()\n header_dict = {}\n for ituple in header_list:\n header_dict[ituple[0].lower()] = ituple[1]\n response_info = {\n 'status': res.status,\n 'reason': res.reason,\n 'headers': header_dict,\n 'body': res.read()\n }\n LOG.debug('<< %d %s, %s, %s' % (response_info['status'],\n response_info['reason'],\n response_info['headers'],\n response_info['body']))\n conn.close()\n return response_info", "def _http_post(self, url, params={}):\n url = self.server + '/api2' + url\n try:\n r = requests.post(url=url, data=params)\n except requests.exceptions.RequestException as e:\n return check_failed(e)\n # raise ClientHttpError(None, e)\n if r.status_code != 200:\n return check_failed(r.status_code)\n # return ClientHttpError(r.status_code, r.text)\n data = json.loads(r.text)\n # TODO: check data\n return data", "def make_request(request_url, request_url_without_api_key):\n response = requests.get(request_url)\n if response.status_code != requests.codes.ok:\n err = \"Check the URL and API details, cannot connect to: `{}`\".format(\n request_url_without_api_key\n )\n logger.error(err)\n raise TaskError(\"Bad response from server: {}\".format(err))\n try:\n packages = response.json()\n except json.JSONDecodeError:\n err = \"Response is OK, but cannot decode JSON from server\"\n logger.error(err)\n raise TaskError(err)\n return packages", "def _request(http, project, method, data, base_url, client_info):\n user_agent = client_info.to_user_agent()\n headers = {\n \"Content-Type\": \"application/x-protobuf\",\n \"User-Agent\": user_agent,\n connection_module.CLIENT_INFO_HEADER: user_agent,\n }\n api_url = build_api_url(project, method, base_url)\n\n response = http.request(url=api_url, method=\"POST\", headers=headers, data=data)\n\n if response.status_code != 200:\n error_status = status_pb2.Status.FromString(response.content)\n raise exceptions.from_http_status(\n response.status_code, error_status.message, errors=[error_status]\n )\n\n return response.content", "async def _perform_api_request(self, url, **kwargs):\n error = ''\n json = {}\n max_retries = 5\n for retries in range(max_retries):\n async with self._session.get(url, **kwargs) as resp:\n self.request_count += 1\n status = resp.status\n if resp.status == 504:\n error = 'API timeout'\n self.retry_count += 1\n continue\n try:\n resp.raise_for_status()\n except ClientResponseError:\n error = f'{resp.status}: {resp.reason}'\n continue\n try:\n json = await resp.json()\n except ContentTypeError:\n error = 'Unable to decode JSON'\n self.retry_count += 1\n status = 0\n continue\n json['request_datetime'] = datetime.now()\n break\n\n if retries == max_retries - 1 and error:\n logger.warning(error)\n\n return json, status", "def __make_api_post(self, api_path, data=None):\n headers = {\n \"Content-type\": \"application/json\",\n \"Accept\": \"application/json\"}\n x = json.dumps(data)\n\n try:\n req = urllib2.Request(self.api_server + api_path, x, headers)\n self.last_response = urllib2.urlopen(req, cafile=self.cacert_path)\n json_data = self.last_response.read()\n\n # Check for errors\n except urllib2.HTTPError as err:\n error = \"API HTTP error [%s] - '%s'\" % (err.code, err)\n raise EFIgyCliError(error, err)\n\n except urllib2.URLError as err:\n error = 'Problem calling API at location %s - %s' % (\n self.api_server + api_path, err)\n raise EFIgyCliError(error, self.last_response)\n\n # Decode json response into an object\n try:\n ret = json.loads(json_data)\n except ValueError as err:\n error = \"Problem deserialising data, expecting JSON.\\nError: %s\\nData: %s\" % (\n err, json_data)\n raise EFIgyCliError(error, self.last_response)\n\n # Return JSON deserialised object\n # print \"DEBUG - %s\"%(ret), type(ret)\n return ret", "async def request_api(url):\n\theaders = {\"User-Agent\": f\"Mozilla/5.0 aiotfm/{__version__}\"}\n\n\ttry:\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url, headers=headers) as resp:\n\t\t\t\treturn await resp.json()\n\texcept aiohttp.ClientError:\n\t\treturn {}", "def _make_request(self):\n response = urllib2.urlopen(\n url=self.api_url,\n data=self._get_request_data()\n )\n content = response.read()\n return json.loads(content.decode('utf8'))", "def _response(request):\n with urllib.request.urlopen(request) as response:\n status = response.getcode()\n # print(status, response.info(), )\n data = json.loads(\n response.read().decode('utf-8')\n )\n # print(data)\n if status == 200 and data[\"ok\"]:\n return data, status\n elif status == 200 and not data[\"ok\"]:\n raise ValueError('client._response() - Server response is not good ' +\n json.dumps(data))\n else:\n raise ConnectionFault('client._response() - Connection Error: ' +\n str(response.getcode()))", "async def send_request(self, url: str, params: dict) -> dict:\n if self.session is None:\n # Create a session if one doesn't exist\n await self.create_session()\n\n async with self.session.get(url, params=params, headers=self.headers) as resp:\n # Make sure that the response of the request\n # returns code 200. Something wrong happened if it doesn't\n if not (300 > resp.status >= 200):\n # Raise an error if the status code isn't 200\n raise ParsingError(f\"Library error parsing request from API: {str(resp.status)}\")\n\n try:\n # We attempt to return the contents\n # of the request in JSON format\n response = await resp.json()\n if resp.status >= 400:\n # This is a validation error from the API\n # Likely has to do with missing/improper params\n missing_params = list()\n for param in response[\"detail\"]:\n missing_params.append(f\"{param['msg']} - {param['loc'][0]}\")\n raise InvalidParams(f\"Impropert params in given request: {missing_params}\")\n # If that fails, simply return the contents of the request\n # without ant kind of formatting (aka just read it)\n except aiohttp.ClientResponseError:\n raise ParsingError(\"Could not return contents from the request\")\n\n # Return the respose from the request, if any\n return response", "async def _api_request(self, path: str, method=\"GET\") -> Any:\n params = {\"access_token\": self.token}\n\n attempt = 0\n while attempt <= MAX_REQUEST_ATTEMPT_COUNT:\n attempt += 1\n conn = aiohttp.TCPConnector(ssl=False)\n try:\n async with aiohttp.request(\n method, f\"{self.api_url}/{path}\", params=params, connector=conn\n ) as resp:\n if resp.status >= 400:\n # retry on server errors or request timeout w/ increasing delay\n if resp.status >= 500 or resp.status == 408:\n if attempt < MAX_REQUEST_ATTEMPT_COUNT:\n _LOGGER.debug(\n \"%s request to %s failed with code %d: %s.\"\n \" Retrying...\",\n method,\n path,\n resp.status,\n resp.reason,\n )\n await asyncio.sleep(\n attempt * REQUEST_RETRY_DELAY_INTERVAL\n )\n continue\n\n if resp.status == 401:\n raise InvalidToken()\n else:\n raise RequestError(resp)\n try:\n json = await resp.json()\n if \"error\" in json and json[\"error\"]:\n raise RequestError(resp)\n return json\n except ContentTypeError as e:\n text = await resp.text()\n _LOGGER.warn(\"Unable to parse as JSON: %s\", text)\n raise e\n except (\n ClientConnectionError,\n asyncio.TimeoutError,\n ContentTypeError,\n ) as e:\n # catch connection exceptions to retry w/ increasing delay\n if attempt < MAX_REQUEST_ATTEMPT_COUNT:\n _LOGGER.debug(\n \"%s request to %s failed with %s. Retrying...\",\n method,\n path,\n str(e),\n )\n await asyncio.sleep(attempt * REQUEST_RETRY_DELAY_INTERVAL)\n continue\n else:\n raise e\n finally:\n await conn.close()", "def http_request(\n self,\n method: str,\n url_suffix: str,\n json_data=None,\n params=None,\n headers=None,\n ):\n resp = Response()\n try:\n resp = super()._http_request(\n method=method,\n url_suffix=url_suffix,\n json_data=json_data,\n params=params,\n headers=headers,\n resp_type='response',\n timeout=self.request_timeout,\n ok_codes=(200, 201),\n error_handler=self.handle_error_response,\n )\n except MissingSchema:\n raise ValueError(MESSAGES['MISSING_SCHEMA_ERROR'])\n except InvalidSchema:\n raise ValueError(MESSAGES['INVALID_SCHEMA_ERROR'])\n except InvalidURL:\n raise ValueError(MESSAGES['INVALID_API_URL'])\n except DemistoException as e:\n self.handle_demisto_exception(e)\n\n if resp.ok:\n content_type = resp.headers.get('Content-Type', '')\n if content_type == CONTENT_TYPE_JSON:\n # Handle empty response\n if resp.text == '':\n return resp\n else:\n return resp.json()\n elif self.is_supported_context_type(content_type):\n return resp", "def request_routine(self, url, request_method, json_data=None):\n response_obj = requests.request(request_method,\n url=url,\n headers=self.header,\n data=json.dumps(json_data),\n verify=self.verify)\n\n LOG.debug('JovianDSS: Response code: %s', response_obj.status_code)\n LOG.debug('JovianDSS: Response data: %s', response_obj.text)\n\n ret = dict()\n ret['code'] = response_obj.status_code\n\n if '{' in response_obj.text and '}' in response_obj.text:\n if \"error\" in response_obj.text:\n ret[\"error\"] = json.loads(response_obj.text)[\"error\"]\n else:\n ret[\"error\"] = None\n if \"data\" in response_obj.text:\n ret[\"data\"] = json.loads(response_obj.text)[\"data\"]\n else:\n ret[\"data\"] = None\n\n return ret", "def make_request(self, url):\n try:\n response = requests.get(url)\n if response.status_code != 200:\n return None\n return response.json()\n except requests.ConnectionError:\n return None", "def call_api(payload):\n data = requests.get(weatherbit_url, params=payload)\n assert data.status_code == 200, f\"Something wrong. Error details: {data.json()['error']}\"\n assert 'error' not in data.json().keys(), f'Problem: {data.json()}'\n return data.json()", "def _request(self, query):\n query_string = self._create_query_string(query)\n\n try:\n response = requests.get(query_string)\n except requests.exceptions.ConnectionError:\n raise EngineConnectionException(self.name, \"Unable to send request, check connectivity.\")\n\n if response.status_code != 200:\n raise EngineConnectionException(self.name, \"\", code=response.status_code)\n\n return self._parse_json_response(query, response)", "def http_request(self, method: str, url_suffix: str, params: dict = None, json_data: dict = None, **kwargs):\n response = self._http_request(method, url_suffix, params=params, json_data=json_data, resp_type=\"response\",\n ok_codes=[200, *list(HTTP_ERRORS.keys())], raise_on_status=False, **kwargs)\n if response.status_code == 400 and response.json() and response.json().get('Message'):\n raise DemistoException(\n HTTP_ERRORS[response.status_code].format(\"Message:\" + response.json().get(\"Message\")))\n elif response.status_code in list(HTTP_ERRORS.keys()):\n raise DemistoException(HTTP_ERRORS[response.status_code])\n return response.json()", "def _request(self, method, api_url, **kwargs):\n if not self.connected:\n raise Exception(\"'{d}' is not connected for alias '{a}'\"\n .format(d=self.device.name,\n a=self.alias))\n\n # Deal with the dn\n full_url = '{f}{api_url}'.format(f=self.url, api_url=api_url)\n\n if 'data' in kwargs:\n payload = kwargs['data']\n elif 'json' in kwargs:\n payload = kwargs['json']\n else:\n payload = ''\n\n log.info(\"Sending {method} command to '{d}':\"\n \"\\napi_url: {furl}\\nPayload:{payload}\"\n .format(method=method,\n d=self.device.name,\n furl=full_url,\n payload=payload))\n\n headers = {\n 'Dcnm-Token': self.token,\n 'Content-type': 'application/json'\n }\n\n # Send to the device\n response = self.session.request(method=method, url=full_url, headers=headers, **kwargs)\n\n\n # Make sure it was successful\n try:\n response.raise_for_status()\n except Exception:\n raise RequestException(\n \"'{c}' result code has been returned \"\n \"for '{d}'.\\nResponse from server: \"\n \"{r}\".format(d=self.device.name,\n c=response.status_code,\n r=response.text))\n\n # In case the response cannot be decoded into json\n # warn and return the raw text\n\n try:\n output = response.json()\n if isinstance(output, list):\n output = {\"temproot\": output}\n except Exception:\n log.warning('Could not decode json. Returning text!')\n output = response.text\n\n return output", "def _request(self, url):\n response = requests.get(url, headers=self.header)\n\n if str(response.status_code).startswith('2'):\n return response\n\n raise Exception(\"URI request returned an error. Error Code \" + str(response.status_code))", "def base_request(url_path):\n response = requests.get(settings.URL_API + url_path)\n if response.status_code != 200:\n return response\n else:\n return response.json()", "def api_call(endpoint, params, headers):\n\n api_response = get(BASE_URL.format(endpoint=endpoint), params=params,\n headers=headers)\n\n api_response.raise_for_status()\n json_resp = api_response.json()\n\n api_response.close()\n return json_resp", "def make_api_call(action, parameters = {}, method = 'get', data = {}):\n headers = {\n 'Content-type': 'application/json',\n 'Accept-Encoding': 'gzip',\n 'Authorization': 'Bearer %s' % ACCESS_TOKEN\n }\n if method == 'get':\n r = s.request(method, API_BASE_URL+action, headers=headers, params=parameters, timeout=30)\n elif method == 'post':\n r = s.request(method, API_BASE_URL+action, headers=headers, data=data, params=parameters, timeout=10)\n else:\n raise ValueError('Method should be get or post.')\n log('API %s call: %s' % (method, r.url) )\n if ((r.status_code == 200 and method == 'get') or (r.status_code == 201 and method == 'post')):\n return r.json()\n else:\n raise ValueError('API error when calling %s : %s' % (r.url, r.content))", "def request(self, verb, subpath, data=''):\n if not self.api_key or not self.api_secret:\n raise Exception(\"Vingd authentication credentials undefined.\")\n \n endpoint = urlparse(self.api_endpoint)\n if endpoint.scheme != 'https':\n raise Exception(\"Invalid Vingd endpoint URL (non-https).\")\n \n host = endpoint.netloc.split(':')[0]\n port = 443\n path = urljoin(endpoint.path+'/', subpath)\n \n creds = \"%s:%s\" % (self.api_key, self.api_secret)\n headers = {\n 'Authorization': b'Basic ' + base64.b64encode(creds.encode('ascii')),\n 'User-Agent': self.USER_AGENT\n }\n try:\n conn = httplib.HTTPSConnection(host, port)\n conn.request(verb.upper(), quote(path), data, headers)\n r = conn.getresponse()\n content = r.read().decode('ascii')\n code = r.status\n conn.close()\n except httplib.HTTPException as e:\n raise InternalError('HTTP request failed! (Network error? Installation error?)')\n \n try:\n content = json.loads(content)\n except:\n raise GeneralException(content, 'Non-JSON server response', code)\n \n if 200 <= code <= 299:\n try:\n return content['data']\n except:\n raise InvalidData('Invalid server DATA response format!')\n \n try:\n message = content['message']\n context = content['context']\n except:\n raise InvalidData('Invalid server ERROR response format!')\n \n if code == Codes.BAD_REQUEST:\n raise InvalidData(message, context)\n elif code == Codes.FORBIDDEN:\n raise Forbidden(message, context)\n elif code == Codes.NOT_FOUND:\n raise NotFound(message, context)\n elif code == Codes.INTERNAL_SERVER_ERROR:\n raise InternalError(message, context)\n elif code == Codes.CONFLICT:\n raise GeneralException(message, context)\n \n raise GeneralException(message, context, code)", "def _method_call(self, method, **kwargs):\n try:\n connection = httplib.HTTPConnection(self._api_address)\n except:\n raise FantasyDataError('Error: Cannot connect to the FantasyData API')\n\n try:\n method = method.format(format=self._response_format, **kwargs)\n request_url = \"/standard/{format}/{method}?{get_params}\".format(format=self._response_format, method=method,\n get_params=self._get_params)\n connection.request(\"GET\", request_url, \"\", self._headers)\n response = connection.getresponse()\n\n result = json.loads(response.read())\n\n if isinstance(result, dict) and \"statusCode\" in result:\n if (result['statusCode']) == 401:\n raise FantasyDataError('Error: Invalid API key')\n else:\n raise FantasyDataError('Error: Failed to get response')\n\n return result\n # except:\n # pass\n finally:\n connection.close()", "def http_call(\n self,\n method,\n url,\n data=None,\n json_data=None,\n headers=None,\n verify=False,\n params=None,\n ):\n if data:\n _response = getattr(self.session, method.lower())(\n url, data=data, headers=headers, params=params, verify=verify\n )\n\n elif json_data:\n _response = getattr(self.session, method.lower())(\n url, json=json_data, headers=headers, params=params, verify=verify\n )\n\n else:\n _response = getattr(self.session, method.lower())(\n url, headers=headers, params=params, verify=verify\n )\n self.api_calls += 1\n\n try:\n _response.raise_for_status()\n except HTTPError:\n raise HTTPError(\n f\"{_response.json()['status']}: {_response.json()['message']}\"\n )\n\n return _response", "def _api_call(self, api_call, method=\"GET\", payload=None):\n # type: (str, str, Dict[str, str]) -> requests.Response\n\n headers = {\n \"accept\" : \"application/json\",\n \"Authorization\" : f\"Bearer {self.access_token}\",\n \"x-ibm-client-id\" : self.client_id,\n }\n self.__log.debug(headers)\n api_url = f\"{self.base_url}/{api_call}\"\n\n self.__log.debug(f\"Calling {api_url} with method {method}\")\n if method == \"GET\":\n resp = requests.get(api_url, headers=headers)\n elif method == \"POST\":\n resp = requests.post(api_url, headers=header, data=payload)\n elif method == \"PUT\":\n resp = requests.put(api_url, headers=header, data=payload)\n elif method == \"DELETE\":\n resp = requests.delete(api_url, headers=headers)\n elif method == \"HEAD\":\n resp = requests.head(api_url, headers=headers)\n elif method == \"OPTIONS\":\n resp = requests.options(api_url, headers=headers)\n else:\n raise Exception(f\"The method {method} is unsupported\")\n \n if (resp.ok):\n return resp\n else:\n self.__log.debug(resp.status_code)\n self.__log.debug(resp.text)\n return resp", "def request(self, service, data):\n _res = self._request(service, data)\n res = _res.json()[0][0]\n if res[\"success\"] == True:\n return res[\"result\"]\n else:\n err_msg = res[\"errmsg\"]\n raise Exception(\"Request not successful: '{0}'\".format(err_msg))", "def send_request(self, request):\n json_results = requests.get(request).json()\n\n status = json_results['status']\n\n if status == const.STATUS_OK:\n return json_results['results']\n\n self.log.warning(self.get_status_code(status))", "def request(self, params):\n params[\"public_key\"] = self.__PUBLIC_KEY\n post_data = urllib.parse.urlencode(params)\n hmac_ = hmac.new(self.__SECRET_KEY, post_data.encode(\"utf-8\"), hashlib.sha256).hexdigest()\n\n curl = pycurl.Curl()\n curl.setopt(pycurl.URL, self.__URL)\n curl.setopt(pycurl.HTTPHEADER, ['HMAC: ' + str(hmac_)])\n curl.setopt(pycurl.POST, True)\n curl.setopt(pycurl.POSTFIELDS, post_data)\n curl.setopt(pycurl.CONNECTTIMEOUT, 10)\n curl.setopt(pycurl.TIMEOUT, 5)\n\n buf = io.BytesIO()\n curl.setopt(pycurl.WRITEFUNCTION, buf.write)\n curl.perform()\n\n response = buf.getvalue()\n\n # Uncomment to debug raw JSON response\n # self.__log(\"< \" + response)\n\n http_code = curl.getinfo(pycurl.HTTP_CODE)\n\n curl.close()\n\n result = json.loads(response.decode('utf-8'))\n\n if http_code != 200:\n if result[\"error\"]:\n # 404 with some valid JSON\n self.__log(\"ERROR: HTTP \" + str(http_code) + \": \" + json.dumps(result[\"error\"]))\n else:\n self.__log(\"ERROR: HTTP \" + str(http_code) + \": \" + response)\n else:\n if result is None:\n self.__log(\"ERROR: Unparsable JSON \" + response)\n else:\n if 'error' in result: # || !$result[\"result\"]\n self.__log(\"ERROR: \" + json_encode(result[\"error\"]))\n else:\n result = result[\"result\"]\n\n return result", "def _get(self, url: str) -> requests.Response:\n # todo: do some error checking here\n if url.startswith(API_PATH['base']):\n try:\n # logger.debug(f\"RestClient._get(): {url}\") # log in calling function\n response = requests.get(url, auth=self.auth)\n rest_code = response.json()['meta']['code']\n if rest_code not in [200, 201, 204]:\n raise RestException(f\"REST API Error: {rest_code}. {response.content}\")\n except RestException as e:\n logger.error(e)\n return None\n return response\n else:\n raise ValueError(f\"URL is invalid: {url}\")", "async def _request(\n self, req_method: str, endpoint: str, extra_query: QueryDict = None, json=True\n ) -> Union[Dict, bytes]:\n if req_method not in (\"GET\", \"POST\"):\n raise APIError(f\"{req_method} not a known request method!\")\n\n url = await self._create_url(endpoint, extra_query=extra_query)\n\n async with aiohttp.ClientSession() as session:\n\n session_methods = {\"GET\": session.get, \"POST\": session.post}\n\n async with session_methods[req_method](url) as resp:\n self.logger.debug(\"got response: %s\", resp)\n\n if resp.status == 200:\n if json:\n data = await resp.json()\n self.logger.debug(\"got json: %s\", data)\n if data[\"subsonic-response\"][\"status\"] == \"failed\":\n raise APIError(\n data[\"subsonic-response\"][\"error\"][\"message\"]\n )\n return data\n\n data = await resp.read()\n return data\n\n raise APIError(f\"got status code {resp.status}!\")", "async def _api_call(self, url, payload={}, retry=False):\n timeout = aiohttp.ClientTimeout(total=self.api_timeout)\n try:\n async with self._client_session.get(\n API_URL + url, headers=self.headers, timeout=timeout, data=payload\n ) as resp:\n if not retry and resp.status == 401:\n await self.renew_auth()\n return await self._api_call(url, payload, True)\n\n # 4xx represents unauthenticated\n if resp.status == 401 or resp.status == 403 or resp.status == 404:\n raise SenseAuthenticationException(f\"API Return Code: {resp.status}\")\n\n if resp.status != 200:\n raise SenseAPIException(f\"API Return Code: {resp.status}\")\n\n return await resp.json()\n except asyncio.TimeoutError as ex:\n # timed out\n raise SenseAPITimeoutException(\"API call timed out\") from ex", "def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()", "def execute(self) -> typing.Dict[str, typing.Any]:\n headers = {\n \"User-Agent\": \"{zenora.__name__} {zenora.__version__}\",\n \"Authorization\": f\"{self.token}\",\n }\n if self.headers:\n headers = self.headers\n\n if self.json:\n r = requests.request(\n method=self.method,\n url=self.url,\n headers=headers,\n json=self.json,\n )\n else:\n r = requests.request(\n method=self.method,\n url=self.url,\n headers=headers,\n data=self.form_data,\n )\n\n return raise_error_or_return(r) # type: ignore[return-value]", "def __call(method, resource, headers=None, json=None):\n result = requests.request(method, resource, headers=headers, json=json)\n\n if result:\n try:\n return result.json()\n except ValueError:\n pass\n\n _LOGGER.debug(\"Erroneous response (%s)\", result)\n return result", "async def request(\n self, method: str, path: Optional[str] = \"\", json: Optional[dict] = None\n ) -> dict:\n LOGGER.debug('Sending \"%s\" \"%s\" to \"%s %s\"', method, json, self.host, path)\n\n url = f\"http://{self.host}:{self.port}/api/{self.api_key}{path}\"\n\n try:\n async with self.session.request(method, url, json=json) as res:\n\n if res.content_type != \"application/json\":\n raise ResponseError(\n \"Invalid content type: {}\".format(res.content_type)\n )\n\n response = await res.json()\n LOGGER.debug(\"HTTP request response: %s\", pformat(response))\n\n _raise_on_error(response)\n\n return response\n\n except client_exceptions.ClientError as err:\n raise RequestError(\n \"Error requesting data from {}: {}\".format(self.host, err)\n ) from None", "def _make_request(self):\n try:\n self.response = requests.request(\n method=self.method,\n url=self.url,\n params=self.params,\n data=self.data,\n )\n\n logger.debug(f\"Request URL: {self.response.url}\")\n\n self.response.raise_for_status()\n\n # wrap all `requests` library error and serve as custom application error\n except RequestException as e:\n logger.error(e.__str__(), exc_info=True)\n raise ExternalAPIError(\n \"Error while communication with External API\"\n )", "def _do_api_call(\n self,\n endpoint_info: tuple[str, str],\n json: dict[str, Any] | None = None,\n wrap_http_errors: bool = True,\n ):\n method, endpoint = endpoint_info\n\n # TODO: get rid of explicit 'api/' in the endpoint specification\n url = f\"https://{self.host}/{endpoint}\"\n\n aad_headers = self._get_aad_headers()\n headers = {**self.user_agent_header, **aad_headers}\n\n auth: AuthBase\n token = self._get_token()\n if token:\n auth = _TokenAuth(token)\n else:\n self.log.info(\"Using basic auth.\")\n auth = HTTPBasicAuth(self.databricks_conn.login, self.databricks_conn.password)\n\n request_func: Any\n if method == \"GET\":\n request_func = requests.get\n elif method == \"POST\":\n request_func = requests.post\n elif method == \"PATCH\":\n request_func = requests.patch\n elif method == \"DELETE\":\n request_func = requests.delete\n else:\n raise AirflowException(\"Unexpected HTTP Method: \" + method)\n\n try:\n for attempt in self._get_retry_object():\n with attempt:\n response = request_func(\n url,\n json=json if method in (\"POST\", \"PATCH\") else None,\n params=json if method == \"GET\" else None,\n auth=auth,\n headers=headers,\n timeout=self.timeout_seconds,\n )\n response.raise_for_status()\n return response.json()\n except RetryError:\n raise AirflowException(f\"API requests to Databricks failed {self.retry_limit} times. Giving up.\")\n except requests_exceptions.HTTPError as e:\n if wrap_http_errors:\n raise AirflowException(\n f\"Response: {e.response.content}, Status Code: {e.response.status_code}\"\n )\n else:\n raise e", "def _do_api_call(self, endpoint_info, json=None):\n method, endpoint = endpoint_info\n if self.fivetran_conn is None:\n self.fivetran_conn = self.get_connection(self.conn_id)\n auth = (self.fivetran_conn.login, self.fivetran_conn.password)\n url = f\"{self.api_protocol}://{self.api_host}/{endpoint}\"\n\n headers = {\n \"User-Agent\": self.api_user_agent\n }\n\n if method == \"GET\":\n request_func = requests.get\n elif method == \"POST\":\n request_func = requests.post\n elif method == \"PATCH\":\n request_func = requests.patch\n headers.update({\"Content-Type\": \"application/json;version=2\"})\n else:\n raise AirflowException(\"Unexpected HTTP Method: \" + method)\n\n attempt_num = 1\n while True:\n try:\n response = request_func(\n url,\n data=json if method in (\"POST\", \"PATCH\") else None,\n params=json if method in (\"GET\") else None,\n auth=auth,\n headers=headers\n )\n response.raise_for_status()\n return response.json()\n except requests_exceptions.RequestException as e:\n if not _retryable_error(e):\n # In this case, the user probably made a mistake.\n # Don't retry.\n raise AirflowException(\n f\"Response: {e.response.content}, \"\n f\"Status Code: {e.response.status_code}\"\n )\n\n self._log_request_error(attempt_num, e)\n\n if attempt_num == self.retry_limit:\n raise AirflowException(\n f\"API request to Fivetran failed {self.retry_limit} times.\"\n \" Giving up.\"\n )\n\n attempt_num += 1\n sleep(self.retry_delay)", "def _do_operation(self,\n\t operation, api_path, query_params=None, munchify=True, debug_response=False,\n\t expected_status_codes=range(200, 300), *unused_args, **kwargs):\n\n\t\tif not self._session:\n\t\t\tmsg = u'No session has been created for the API. Have you called create() yet?'\n\t\t\tlog_with_debug_info(logging.ERROR, msg)\n\t\t\traise OperationError(msg)\n\n\t\tresponse = None\n\t\tretdata = None\n\n\t\tendpoint = self._build_endpoint(api_path, params=kwargs, query_params=query_params)\n\n\t\tparams = {u'headers': self._headers, u'verify': self._verify_cert}\n\n\t\tif u'data' in kwargs:\n\t\t\tparams[u'data'] = json.dumps(kwargs[u'data'])\n\n\t\tlog_with_debug_info(logging.DEBUG, u'Call parameters: {0}'.format(params))\n\n\t\t# Call the API endpoint\n\t\tresponse = getattr(self._session, operation)(endpoint, **params)\n\n\t\tlog_with_debug_info(logging.DEBUG, u'Response status: {0} {1}'.format(response.status_code,\n\t\t response.reason))\n\n\t\tif response.status_code not in expected_status_codes:\n\t\t\ttry:\n\t\t\t\tretdata = response.json()\n\t\t\texcept Exception as e:\n\t\t\t\t# Invalid JSON payload.\n\t\t\t\tmsg = (u'HTTP Status Code: [{0}]; API response data for end-point [{1}] does not '\n\t\t\t\t u'appear to be valid JSON. Cause: {2}.')\n\t\t\t\tmsg = msg.format(response.status_code, endpoint, e)\n\t\t\t\tif debug_response:\n\t\t\t\t\tlog_with_debug_info(logging.ERROR, msg + u' Data: [' + str(response.text) + u']')\n\t\t\t\traise InvalidJSONError(msg, resp=response)\n\t\t\tmsg = u'{0} request to RESTful API at [{1}] expected status(s) {2}; failed: {3} {4};'\\\n\t\t\t u' Response: {5}'\n\t\t\tmsg = msg.format(operation.upper(), endpoint, expected_status_codes,\n\t\t\t response.status_code, response.reason, retdata)\n\t\t\tlog_with_debug_info(logging.ERROR, msg)\n\t\t\traise OperationError(msg, resp=response)\n\n\t\ttry:\n\t\t\tif response.status_code in ('204',):\n\t\t\t\t# \"204 No Content\"\n\t\t\t\tretdata = {}\n\t\t\telse:\n\t\t\t\t# Decode the expected JSON\n\t\t\t\tretdata = response.json()\n\t\texcept Exception as e:\n\t\t\t# Invalid JSON payload.\n\t\t\tmsg = (u'HTTP Status Code: [{0}]; API response data for end-point [{1}] does not '\n\t\t\t u'appear to be valid JSON. Cause: {2}.')\n\t\t\tmsg = msg.format(response.status_code, endpoint, e)\n\t\t\tif debug_response:\n\t\t\t\tlog_with_debug_info(logging.ERROR, msg + u' Data: [' + str(response.text) + u']')\n\t\t\traise InvalidJSONError(msg, resp=response)\n\t\tretdata = munch.munchify(retdata) if munchify else retdata\n\t\treturn (retdata[u'response'] if u'response' in retdata else retdata), response", "def _api_request(method, path, data = None):\n headers = {}\n\n if method == 'PUT' or method == 'POST':\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data)\n\n connection = httplib.HTTPSConnection(API_HOST,\n API_PORT,\n key_file = VM_CERTIFICATE,\n cert_file = VM_CERTIFICATE)\n\n connection.request(method, path, data, headers)\n response = connection.getresponse()\n\n if response.status == 404:\n return None\n if response.status == 403:\n return False\n\n if method == 'PUT' or method == 'POST':\n return True\n\n try:\n return json.loads(response.read())\n except:\n print('Response code: %s' % response.status)\n raise", "def _call(self, method: str, *url_parts, data=None,\n params=None, stream=None) -> bytes:\n response = self.request(method.upper(),\n self._mk_url(url_parts),\n params=params,\n data=data,\n stream=stream)\n\n if response.status_code not in (200, 201):\n try:\n self._raise_error(response.status_code, response.json())\n except ValueError as e:\n raise APIError(response.status_code,\n self._RESPONSE_NOT_JSON_MSG) from e\n\n return response.content", "def make_request(self, url, action, data='', status_code='', parser=None):\n self._url = self.get_api_path(url)\n headers = {\n 'Content-Type': \"application/json\",\n 'Token': self.token,\n\n }\n kwargs = {}\n if headers:\n kwargs.update(headers=headers)\n if data:\n kwargs.update(data=json.dumps(data))\n\n return getattr(self.http, action.lower())(self._url, **kwargs)", "def http_request(url, post_params=None, timeout=None, max_response=None, json=False):\n try:\n if post_params:\n response = requests.post(url, data=post_params, timeout=timeout)\n else:\n response = requests.get(url, timeout=timeout)\n except requests.exceptions.RequestException as error:\n raise HttpError(exception=error)\n if response.status_code != 200:\n raise HttpError(\n error_type='status_code',\n message='Error making request: response code {} ({}).'\n .format(response.status_code, response.reason)\n )\n if json:\n try:\n return response.json()\n except json.JSONDecodeError as error:\n raise HttpError(\n exception=error,\n message='Response not valid JSON: {!r}'.format(response.text)\n )\n else:\n return response.text", "def makeApiCall(url, endpointParams, type):\r\n\r\n if type == 'POST': # post request\r\n data = requests.post(url, endpointParams)\r\n else: # get request\r\n data = requests.get(url, endpointParams)\r\n\r\n response = dict() # hold response info\r\n response['url'] = url # url we are hitting\r\n response['endpoint_params'] = endpointParams # parameters for the endpoint\r\n response['endpoint_params_pretty'] = json.dumps(endpointParams, indent=4) # pretty print for cli\r\n response['json_data'] = json.loads(data.content) # response data from the api\r\n response['json_data_pretty'] = json.dumps(response['json_data'], indent=4) # pretty print for cli\r\n\r\n return response # get and return content\r", "def _do_call(cls, method, url, params={}):\n headers = {\n 'User-Agent': 'py-retain/' + __version__,\n 'content-type': 'application/json'\n }\n try:\n r = cls.request_map[method.lower()]\n except KeyError:\n raise ValueError(\"Unknow HTTP Method\")\n response = r(\n url,\n auth=(cls.app_id, cls.api_key),\n headers=headers,\n data=json.dumps(params),\n timeout=cls.timeout)\n return response.json()", "def request(self, method, path, msg=None, json=None, retfmt='bool', errorfatal=True, autoauth=True, verify_ssl=False):\n url = self.admin_url + '/acs/api/v1' + path\n\n if msg:\n self.log.info(msg)\n\n headers = self.default_headers.copy()\n\n if not self.auth_header and autoauth:\n self.set_auth_header()\n\n if self.auth_header:\n headers.update(self.auth_header)\n\n if method == 'get':\n r = requests.get(url, headers=headers, json=json, verify=verify_ssl)\n elif method == 'post':\n r = requests.post(url, headers=headers, json=json, verify=verify_ssl)\n elif method == 'put':\n r = requests.put(url, headers=headers, json=json, verify=verify_ssl)\n elif method == 'delete':\n r = requests.delete(url, headers=headers, json=json, verify=verify_ssl)\n\n if 200 <= r.status_code < 300:\n self.log.debug(\"success\")\n if retfmt == 'json':\n self.log.debug('returning json')\n return r.json()\n elif retfmt == 'request':\n self.log.debug('returning request object')\n return r\n else:\n return True\n else:\n if 'Content-Type' in r.headers and r.headers['Content-Type'] == 'application/json':\n resp = r.json()['code']\n else:\n resp = r.reason\n msg = \"failed: {}\".format(resp)\n self.log.debug(msg)\n if errorfatal:\n raise Exception(msg)\n else:\n if retfmt == 'request':\n self.log.debug('returning request object')\n return r\n else:\n return None", "def make_request(self, request, captcha_response=None):\n logger.debug('Prepare API Method request %r', request)\n response = self._send_api_request(request=request,\n captcha_response=captcha_response)\n response.raise_for_status()\n response_or_error = json.loads(response.text)\n logger.debug('response: %s', response_or_error)\n\n if 'error' in response_or_error:\n error_data = response_or_error['error']\n vk_error = VkAPIError(error_data)\n\n if vk_error.is_captcha_needed():\n captcha_key = self.get_captcha_key(vk_error.captcha_img_url)\n if not captcha_key:\n raise vk_error\n\n # Retry http request with captcha info attached\n captcha_response = {\n 'sid': vk_error.captcha_sid,\n 'key': captcha_key,\n }\n return self.make_request(\n request, captcha_response=captcha_response)\n\n elif vk_error.is_access_token_incorrect():\n logger.info(\n 'Authorization failed. Access token will be dropped')\n self._access_token = None\n return self.make_request(request)\n\n else:\n raise vk_error\n elif 'execute_errors' in response_or_error:\n # can take place while running .execute vk method\n # See more: https://vk.com/dev/execute\n raise VkAPIError(response_or_error['execute_errors'][0])\n elif 'response' in response_or_error:\n return response_or_error['response']", "def _request(self, url, values=None):\n\n url += \"?{}\".format(urllib.urlencode(values))\n\n request = urllib2.Request(url)\n\n try:\n connection = urllib2.urlopen(request)\n except urllib2.HTTPError, e:\n return {'status': 'Failed', 'message': str(e.reason)}\n except urllib2.URLError, e:\n return {'status': 'Failed', 'message': str(e.reason)}\n except httplib.HTTPException, e:\n return {'status': 'Failed', 'message': str(e.reason)}\n except Exception as exception:\n return {'status': 'Failed', 'message': str(exception)}\n\n response = connection.read()\n connection.close()\n\n\n try:\n result = json.loads(response.decode())\n except ValueError as exception:\n return {'status': 'Failed', 'message': str(exception)}\n\n return result", "async def _httpx_request(method: Literal[\"GET\", \"POST\", \"DELETE\"], url: str, payload: dict, response_model: pydantic.BaseModel, private: bool=False):\n # return\n\n if private:\n try:\n keyset = get_keys() # returns a set of tuples (key, secret) \n except EmptyEnv:\n # do not proceed to send a test request in this case\n return\n key, secret = keyset.pop()\n payload[\"timestamp\"] = auth_timestamp()\n payload[\"signature\"] = auth_signature(url, payload, secret=secret)\n headers = auth_headers(key)\n else:\n headers = {}\n\n async with httpx.AsyncClient() as client:\n if private:\n signature = payload.pop(\"signature\")\n payload = sorted([(k, v) for k, v in payload.items()], reverse=False)\n payload.append((\"signature\", signature)) #! needs to always be last param\n if method in [\"POST\", \"DELETE\"]:\n r = await client.request(method, url, data=dict(payload), headers=headers)\n else:\n r = await client.request(method, url, params=payload, headers=headers)\n else:\n r = await client.request(method, url, params=payload)\n \n rjson = r.json()\n print(\"response.json\", rjson)\n\n assert r.status_code == 200, f\"Json Response {rjson} \\nPayload {payload} \\nHeaders {headers} \\nRequest {r.request}\"\n\n # if empty response, return directly without validation\n # (this might be what we expected, so assertion has to take place in the test function not here)\n if not rjson:\n return rjson\n\n # else we validate\n if isinstance(rjson, list) or isinstance(rjson, tuple):\n response_model(rjson)\n else:\n response_model(**rjson)", "async def _call_api(\n self, endpoint: str, method: str = \"GET\", data: Optional[dict] = None\n ) -> Optional[ClientResponse]:\n try:\n response = await self._session.request(\n method=method,\n url=f\"{BASE_URL}/{endpoint}\",\n headers={\n \"aftership-api-key\": self._api_key,\n \"Content-Type\": \"application/json\",\n },\n json=data,\n timeout=ClientTimeout(total=self._timeout),\n )\n return await self._handle_response(response)\n\n except asyncio.TimeoutError as exception:\n raise AfterShipCommunicationException(\"Timeout error\") from exception\n\n except (ClientError, gaierror) as exception:\n raise AfterShipCommunicationException(\n f\"Communication error {exception}\"\n ) from exception", "def _call_api(self, api_function, kwargs):\n kwargs['_request_timeout'] = self._timeout\n retries = self._retries\n while True:\n try:\n response = api_function(**kwargs)\n # Call was successful (200)\n return self._create_valid_response(response, api_function, kwargs)\n except ApiException as error:\n # If no chance for retries, return the error\n if retries == 0:\n return self._create_error_response(error)\n # If bad request or not found, return the error (it will never work)\n elif error.status in [400, 404]:\n return self._create_error_response(error)\n # If authentication error, reset access token and retry\n elif error.status in [401, 403]:\n self._set_auth_header(refresh=True)\n # If rate limit error, wait the proper time and try again\n elif error.status == 429:\n # If the the minute limit hit, wait that long\n if (int(error.headers.get(Headers.x_ratelimit_remaining_min))\n == int(error.headers.get(Headers.x_ratelimit_min))):\n time.sleep(60)\n # Otherwise it was the second limit and only wait a second\n time.sleep(1)\n # If some internal server error we know nothing about, return\n elif error.status == 500:\n return self._create_error_response(error)\n # If internal server errors that has to do with timeouts, try again\n elif error.status > 500:\n pass\n # If error with the swagger client, raise the error\n else:\n raise PureError(error)\n retries = retries - 1", "def _call(self, request_method, endpoint, params=None, data=None):\n response = request_method(\n self._construct_url(endpoint),\n params=params,\n data=data,\n auth=BsdApiAuth(self.api_id, self.api_secret)\n )\n\n if response.status_code == 202 and endpoint != \"get_deferred_results\":\n return self._resolve_deferred_response(response, self.deferred_result_max_attempts)\n else:\n return response", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)" ]
[ "0.6998465", "0.697629", "0.6958306", "0.68044615", "0.67445", "0.673964", "0.67353", "0.67159045", "0.6714112", "0.6700891", "0.6693908", "0.66411114", "0.6610895", "0.6592531", "0.65848005", "0.6565488", "0.65573376", "0.6525482", "0.64771914", "0.6451753", "0.6419186", "0.6418593", "0.63855463", "0.6362626", "0.6345507", "0.63184345", "0.63043714", "0.62958246", "0.62918055", "0.62906134", "0.62864435", "0.62850064", "0.6279468", "0.6279468", "0.6277813", "0.6261688", "0.6256282", "0.6243914", "0.62056494", "0.6205348", "0.6196219", "0.6189344", "0.6188848", "0.6182817", "0.61755407", "0.6174989", "0.6172594", "0.6166166", "0.6156797", "0.6156074", "0.6155242", "0.6146402", "0.6141418", "0.61367166", "0.6135424", "0.6134019", "0.6130558", "0.61191744", "0.611643", "0.6110306", "0.6102733", "0.609839", "0.60884124", "0.6085871", "0.60742205", "0.6063546", "0.6060491", "0.6056238", "0.6054171", "0.60472053", "0.60405785", "0.6038251", "0.6032271", "0.60318375", "0.6017209", "0.6014929", "0.6012561", "0.60053366", "0.6005092", "0.60013676", "0.5996974", "0.5986768", "0.59858346", "0.59836286", "0.59808016", "0.59690297", "0.5953665", "0.59217393", "0.59149665", "0.59087443", "0.5904626", "0.58992016", "0.5897545", "0.5886306", "0.58862835", "0.58861613", "0.5879825", "0.58760065", "0.587209", "0.5871229", "0.587087" ]
0.0
-1
Returns Home Assistant base url without '/api' or trailing slash
def get_url(self, url): if not url: raise ValueError('Property "url" is missing in config') return url.replace("/api", "").rstrip("/")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_url(url_base):\n return f\"{url_base}/api/v2\"", "def get_api_url() -> str:\n\n site = pywikibot.Site()\n url = site.protocol() + \"://\" + site.hostname() + site.apipath()\n return url", "def api_url(self) -> httpx.URL:\n return self._client.base_url", "def get_api_url() -> str:\n\n\tsite = pywikibot.Site()\n\turl = site.protocol() + \"://\" + site.hostname() + site.apipath()\n\treturn url", "def get_api_url():\n return \"https://api.basespace.illumina.com/v1pre3\"", "def get_home_url():\n url = base_url + _HOME\n return url", "def base_url(self):\n return \"http://{0}:{1}/app\".format(self.host, self.port)", "def base_url(self):\n return 'http://%s/api.php?token=%s&path_info=' % \\\n (self.ac_url, self.api_key)", "def api_endpoint(self, url):\n if urlparse(url).scheme in [\"http\", \"https\"]:\n return url # url is already complete\n return urljoin(f\"{RESOURCE}/{API_VERSION}/\", url.lstrip(\"/\"))", "def _get_api_url(self):\n return \"%s/%s/\" % (settings.API_URL, settings.API_VERSION)", "def _base_url(self):\n # URL Protocol\n proto = 'https' if self._ssl else 'http'\n\n # Device port number\n if self._port is None:\n port = 8080 if self._ssl else 8008\n else:\n port = self._port\n \n return f'{proto}://{self._address}:{port}/api/v1'", "def api_base_url(self):\n\n\t\treturn self._api_base_url", "def get_base_url(self):\n try:\n return self.get_metadata()['api_endpoint']\n except requests.exceptions.RequestException:\n raise", "def api_url(self):\n return self.get_api_url()", "def base_url():\n return json.loads('{\"message\": \"Try with /data\", \"success\": false}')", "def api_url(self, endpoint):\n\n return '{}/{}'.format(self.api_root, endpoint)", "def clean_url(app_server, base_path) -> str:\n if app_server.endswith('/'):\n base_url = f\"{app_server[:-1]}{base_path}\"\n else:\n base_url = f\"{app_server}/{base_path}\"\n return base_url", "def get_api_url(self, query_, api):\n api_url = \"%s%s%s\" % (api, query_, self.api_key)\n\n return api_url", "def getRootURL():", "def get_short_url_base():", "def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')", "def getHome(self):\n # host = getHostWithPort(url)\n # ui = UrlInfo(url)\n # host = url[ui.getHostHead():ui.getHostTail()]\n host = self.url[self.host_head:self.host_tail]\n return \"http://\" + host + \"/\"", "def test_home_url_with_no_appid(self):\n gae_req = AppEngineRequest(url=\"/\")\n\n url = gae_req.build_url()\n\n self.assertEqual(url, \"http://localhost/\")", "def home():\n return (\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n \n )", "def _get_api_url_for (self, component):\n if self.api_data['API_ROOT'].find(self.api_data['API_BASE_URL']) > -1:\n return self.api_data['API_ROOT'] + '/' + self.api_data['BUILD_IDENTIFIER'] + self.urls[component]\n else:\n return self.api_data['API_ROOT'] + self.api_data['API_BASE_URL'] + '/' + self.api_data['BUILD_IDENTIFIER'] + self.urls[component]", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def _get_base_url(self):\n return '/{}/'.format(self.name.replace('__', '/'))", "def _get_url(self, *args):\n if self._baseUrl not in args:\n args.insert(0, self._baseUrl)\n args = filter(lambda item: item is not None, args)\n return \"/\".join(args)", "def api_base_url(self):\n\n return self._api_base_url", "def BASE_URL():\n BASE_URL = \"http://api.zippopotam.us/\"\n return BASE_URL", "def url_base():\n return \"https://dev-yourOrg.us.auth0.com\"", "def app_url(self):\n return self.request.host_url", "def base_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"base_url\")", "def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path", "def url (self):\n return Links.createURL('/')", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def api_url(self):\n return self._api_url", "async def _api_url(self) -> URL:\n return await self._gitlab_api_url(\"\")", "def get_base_url(self):\n return self.base_url", "def api_endpoint(self) -> str:\n return pulumi.get(self, \"api_endpoint\")", "def api_path(self, path=\"\"):\n return \"https://{domain}/{path}\".format(\n domain=self.setting(\"DOMAIN\"), path=path\n )", "def Url(self) -> str:", "def fix_apiroot(root):\n if '://' in root:\n return root\n if ('/' not in root) or ('.' not in root.split('/')[0]):\n root = \"www.pennapps.com/\" + root\n return \"http://%s\" % root", "def public_rest_url(path_url: str, domain: str = CONSTANTS.DEFAULT_DOMAIN) -> str:\n return CONSTANTS.FTX_BASE_URL + path_url", "def base_url(self) -> URL:\n return (\n URL(self.url)\n if self.url is not None\n else URL.build(\n scheme=f\"http{'s' if self.ssl else ''}\",\n host=self.hostname or self.ipaddress,\n port=str(self.port) if self.port else None,\n path=self.base_api_path or \"\",\n )\n )", "def get_url():\n config = configparser.RawConfigParser()\n config.read(\"speech.cfg\")\n region = config.get('auth', 'region')\n host = REGION_MAP[region]\n return (\n f\"wss://{host}/speech-to-text/api/v1/recognize\"\n \"?model=en-US_BroadbandModel&x-watson-learning-opt-out=true\"\n )", "def url(self):\n url = os.environ.get('PATH_INFO')\\\n or os.environ.get('REQUEST_URI')\n return url if url else ''", "def apiurl(self):\n return self._apiurl", "def base_url(self):\n return self._get_base_url()", "def getAPI(self):\n return self.api_url", "async def _landing_url(self, responses: SourceResponses) -> URL:\n return URL(f\"{await self._api_url()}/CxWebClient\")", "def full_url(resource):\r\n # if (url/resource == '127.0.0.1':)\r\n if resource == '/' or resource == ' ':\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, URL_TEST)\r\n # else (if url/resource == 'Specific resource')\r\n else:\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, str(resource).replace('/', '\\\\'))\r\n print(f'the client request = {url}')\r\n return url", "def _get_api_endpoint():\n try:\n return get_service_endpoint(\"apiext\").strip(\"/\")\n except:\n log.warn(\n \"Could not find valid apiext endpoint for links so will use policy engine endpoint instead\"\n )\n try:\n return get_service_endpoint(\"policy_engine\").strip(\"/\")\n except:\n log.warn(\n \"No policy engine endpoint found either, using default but invalid url\"\n )\n return \"http://<valid endpoint not found>\"", "def getBaseURL():\n return getQualifiedURL(getScriptname())", "def url_root():\n return \"OK\"", "def get_endpoint_url(endpoint):\n return urljoin(api_url_base(), endpoint)", "def get_api_path(self):\n return self._get_api_base() + '/' + self._get_resource_suffix()", "def sentera_api_url(self, path):\n return \"{}{}\".format(self.config[\"sentera_api_url\"], path)", "def root_url(self) -> str:\n return self.root_hartree.har.root_url", "def api_url(self, command: str) -> str:\n base_url = self.base_url\n path = \"/\".join(x for x in f\"{base_url.path}/api/v2\".split(\"/\") if x != \"\")\n return URL.build(\n scheme=base_url.scheme,\n host=base_url.host,\n port=base_url.port,\n path=f\"/{path}\",\n query={\"apikey\": self.api_token, \"cmd\": command},\n ).human_repr()", "def base_url(self) -> str:\n return self._base_url", "def base_url(self):\n return \"https://api.byte-stack.net\" if self.use_sandbox \\\n else \"https://api.ovo.id\"", "def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"", "async def _landing_url(self, responses: SourceResponses) -> URL:\n return URL((await responses[0].json())[\"url\"] if responses else \"https://trello.com\")", "def form_api_path(self, request_host: RequestHost, api_path_prefix: str, endpoint: str) -> str:\n api_url = self.base_url(request_host)\n\n if api_path_prefix:\n api_url += api_path_prefix\n\n return f\"{api_url}{endpoint}\"", "def _url_full(self):\n return self._url_base + \"/sharing/rest\"", "def join_api_url(api_base_url, api_path):\n if api_base_url.endswith('/'):\n api_base_url = api_base_url[:-1]\n if api_path.startswith('/'):\n api_path = api_path[1:]\n\n return api_base_url + '/' + api_path", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/preciptation<br/>\"\n f\"/api/v1.0/Stations\"\n )", "def _build_url(self):\n url = BASE_URL.format(self._host, self._port)\n _LOGGER.debug(\"TOON fetch URL: %s\", url)\n return url", "def api_endpoint():\n return 'localhost'", "def _parsing_url(self, base_url):\n url2 = f\"{self.location}?apikey={self.api_key}&details=true\"\n absolute_url = urljoin(base_url, url2)\n return absolute_url", "def Home():\n return(\n f\"Hawaii Climate Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"and<br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def base_url(self) -> str | None:\n return self._base_url", "def getBuildbotURL():", "def getHomePage(self):\n return self.home_url", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def basic_url(self):\n return self.base_name + '.cloudlabs.rc.ucl.ac.uk'", "def get_base_url(self):\n return urlparse.urljoin(self.domain, self.root_path)", "def base_url(self):\n return self._base_url", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def Home():\n return (\n f\"Welcome to the Climate App<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def getBaseUrl(self):\n return self.url", "def _get_URL_base(self, request, step):\n index = request.path.find(step.slug)\n\n return request.path[:index]", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url", "def get_url(self, *args):\n try:\n url = '/'.join((self.base_url, ) + args)\n except TypeError:\n url = '/'.join((self.base_url, ) + args[0])\n return url.rstrip('/')", "def url_base(self):\n return 'http://%s:%d/' % (self.host, self.port)", "def GetBaseURL(self):\n base_url = self.server_base_url\n if base_url is None:\n base_url = 'http://%s:%s' % self.server_address[:2]\n\n return base_url", "def url(self) -> str:\n return f\"{self._get_base_url()}{self.path_extension}\"", "def get_api_url(settings: Settings) -> str:\n return _get_control(settings) \\\n .get('provider', {}).get('arguments', {}) \\\n .get('api_url', '')", "def get_url():\n if os.environ['SERVER_PORT'] == '80':\n scheme = 'http://'\n else:\n scheme = 'https://'\n host = os.environ['SERVER_NAME']\n script_name = urllib.quote(os.environ.get('SCRIPT_NAME', ''))\n path_info = urllib.quote(os.environ.get('PATH_INFO', ''))\n qs = os.environ.get('QUERY_STRING', '')\n if qs:\n qs = '?' + qs\n return scheme + host + script_name + path_info + qs", "def home():\n return (\n f\"These are the available routes:</br>\"\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/< start ></br>\"\n f\"/api/v1.0/< start >/< end ></br>\"\n )", "def url(self, api_name):\n return \"https://%s/api/%s/%s/\" % (self.host, self.api_version, api_name)", "def getCompleteUrl(urlPath: str) -> str:\n return os.path.join(BASE_URL, urlPath) if urlPath else BASE_URL", "def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url", "def weather_api_url(self, path):\n return \"{}{}\".format(self.config[\"weather_api_url\"], path)", "def get_base_uri(self, server=Server.DEFAULT):\r\n parameters = {\r\n \"base_url\": {'value': self.base_url, 'encode': False},\r\n }\r\n\r\n return APIHelper.append_url_with_template_parameters(\r\n self.environments[self.environment][server], parameters\r\n )", "def get_full_url(self, part_url):\n return BASE_URL + part_url", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )" ]
[ "0.70845884", "0.69659054", "0.69256717", "0.69165707", "0.6896292", "0.68960935", "0.6873356", "0.68278384", "0.6792706", "0.67290175", "0.6711364", "0.6674965", "0.6651999", "0.6650969", "0.66333526", "0.65721595", "0.65439534", "0.653448", "0.6530355", "0.6518613", "0.651151", "0.6469318", "0.6435553", "0.6401837", "0.63991725", "0.6398251", "0.6398251", "0.63979685", "0.63936293", "0.63881576", "0.63869977", "0.6376707", "0.6363991", "0.63498807", "0.63283956", "0.63164514", "0.63038296", "0.62814426", "0.62730175", "0.6261802", "0.6256061", "0.625495", "0.6223046", "0.62212276", "0.6220565", "0.62032825", "0.6197107", "0.6196266", "0.61933154", "0.6172197", "0.616827", "0.6163223", "0.6156613", "0.6152854", "0.61525714", "0.6150149", "0.6134586", "0.6124526", "0.6120455", "0.61047935", "0.6101056", "0.60986894", "0.6097556", "0.60919434", "0.6083459", "0.60740846", "0.6067206", "0.60528684", "0.6025407", "0.6020927", "0.6015083", "0.60111135", "0.6006659", "0.59971017", "0.5981449", "0.5972066", "0.5969562", "0.5965292", "0.5962938", "0.5959671", "0.595301", "0.595301", "0.5940082", "0.59338266", "0.5932683", "0.59312797", "0.5930683", "0.592773", "0.592682", "0.5926238", "0.59261197", "0.59246904", "0.592321", "0.5916355", "0.59138465", "0.5898959", "0.5888477", "0.58751816", "0.5870684", "0.5870364" ]
0.6285166
37
Make sure the netcdf cc data handler operates correctly
def test_data_handling_nc_cc(): input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'), os.path.join(TEST_DATA_DIR, 'va_test.nc'), os.path.join(TEST_DATA_DIR, 'orog_test.nc'), os.path.join(TEST_DATA_DIR, 'zg_test.nc')] with xr.open_mfdataset(input_files) as fh: min_lat = np.min(fh.lat.values) min_lon = np.min(fh.lon.values) target = (min_lat, min_lon) plevel = fh.plev[-1] ua = np.transpose(fh['ua'][:, -1, ...].values, (1, 2, 0)) va = np.transpose(fh['va'][:, -1, ...].values, (1, 2, 0)) handler = DataHandlerNCforCC(input_files, features=['U_100m', 'V_100m'], target=target, shape=(20, 20), val_split=0.0, worker_kwargs=dict(max_workers=1)) assert handler.data.shape == (20, 20, 20, 2) handler = DataHandlerNCforCC(input_files, features=[f'U_{int(plevel)}pa', f'V_{int(plevel)}pa'], target=target, shape=(20, 20), val_split=0.0, worker_kwargs=dict(max_workers=1)) if handler.invert_lat: handler.data = handler.data[::-1] assert handler.data.shape == (20, 20, 20, 2) assert np.allclose(ua, handler.data[..., 0]) assert np.allclose(va, handler.data[..., 1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_data(self):\n # ================ CHECK DATA / CONNECT / SELECT ================\n N = self.xyz.shape[0]\n # Chech array :\n if (self.connect.shape != (N, N)) or not isinstance(self.connect,\n np.ndarray):\n raise ValueError(\"c_connect must be an array of \"\n \"shape \" + str((N, N)))\n if self.select is None:\n self.select = np.ones_like(self.connect)\n if (self.select.shape != (N, N) or not isinstance(self.select,\n np.ndarray)):\n raise ValueError(\"c_select must be an array of \"\n \"shape \" + str((N, N)))\n # Mask c_connect :\n try:\n self.connect.mask\n except:\n self.connect = np.ma.masked_array(self.connect, mask=True)\n self.connect.mask[self.select.nonzero()] = False\n # Use specific color values :\n if (self.colval is not None) and isinstance(self.colval, dict):\n mask = np.ones_like(self.connect.mask)\n for k, v in zip(self.colval.keys(), self.colval.values()):\n mask[self.connect.data == k] = False\n self.colval[k] = color2vb(v)\n self.connect.mask = mask\n\n # ================ CHECK COLOR ================\n # Check colorby :\n if self.colorby not in ['count', 'strength', 'density']:\n raise ValueError(\"The c_colorby parameter must be 'count', \"\n \"'strength' or 'density'\")\n # Test dynamic :\n if (self.dynamic is not None) and not isinstance(self.dynamic, tuple):\n raise ValueError(\"c_dynamic bust be a tuple\")\n\n # ================ NON-ZERO INDICES ================\n # Find where there is non-masked connections :\n self._nnz_x, self._nnz_y = np.where(~self.connect.mask)\n self._indices = np.c_[self._nnz_x, self._nnz_y].flatten()\n self._Nindices = np.arange(len(self._indices))\n # Build position array :\n self.a_position = np.zeros((2*len(self._nnz_x), 3), dtype=np.float32)\n self.a_position[self._Nindices, :] = self.xyz[self._indices, :]", "def test_solar_cc():\n\n features = ['clearsky_ratio', 'rsds', 'clearsky_ghi']\n input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')]\n nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5')\n\n with xr.open_mfdataset(input_files) as fh:\n min_lat = np.min(fh.lat.values)\n min_lon = np.min(fh.lon.values) - 360\n target = (min_lat, min_lon)\n shape = (len(fh.lat.values), len(fh.lon.values))\n\n with pytest.raises(AssertionError):\n handler = DataHandlerNCforCC(input_files, features=features,\n target=target, shape=shape,\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n handler = DataHandlerNCforCC(input_files, features=features,\n nsrdb_source_fp=nsrdb_source_fp,\n target=target, shape=shape,\n temporal_slice=slice(0, 1),\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n cs_ratio = handler.data[..., 0]\n ghi = handler.data[..., 1]\n cs_ghi = handler.data[..., 2]\n cs_ratio_truth = ghi / cs_ghi\n\n assert cs_ratio.max() < 1\n assert cs_ratio.min() > 0\n assert (ghi < cs_ghi).all()\n assert np.allclose(cs_ratio, cs_ratio_truth)\n\n with Resource(nsrdb_source_fp) as res:\n meta = res.meta\n tree = KDTree(meta[['latitude', 'longitude']])\n cs_ghi_true = res['clearsky_ghi']\n\n # check a few sites against NSRDB source file\n for i in range(4):\n for j in range(4):\n test_coord = handler.lat_lon[i, j]\n _, inn = tree.query(test_coord)\n\n assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j])", "def test_ccds(self):\n #TODO write ccds tests", "def __init__(self, datafile):\r\n super(type(self), self).__init__()\r\n self.data = None\r\n log.info(\"Trying to read netcdf \"+datafile)\r\n try:\r\n # We have to uncompress the file to a temporary file\r\n # This will be deleted on __del__ cleanup\r\n datafile = super(type(self), self).uncompressTempFile(datafile)\r\n self.data = Scientific.IO.NetCDF.NetCDFFile(datafile, \"r\")\r\n except BaseException as e:\r\n log.error(\"Couldn't read netcdf data from file \"+datafile)\r\n log.error(\"Exception {0}\".format(e))", "def read_netcdf(self,filename):", "def sanity_check_step(self):\n\n incs = [\"netcdf.h\"]\n libs = [\"libnetcdf.so\", \"libnetcdf.a\"]\n # since v4.2, the non-C libraries have been split off in seperate extensions_step\n # see netCDF-Fortran and netCDF-C++\n if LooseVersion(self.version) < LooseVersion(\"4.2\"):\n incs += [\"netcdf%s\" % x for x in [\"cpp.h\", \".hh\", \".inc\", \".mod\"]] + \\\n [\"ncvalues.h\", \"typesizes.mod\"]\n libs += [\"libnetcdf_c++.so\", \"libnetcdff.so\",\n \"libnetcdf_c++.a\", \"libnetcdff.a\"]\n\n custom_paths = {\n 'files': [\"bin/nc%s\" % x for x in [\"-config\", \"copy\", \"dump\",\n \"gen\", \"gen3\"]] +\n [\"lib/%s\" % x for x in libs] +\n [\"include/%s\" % x for x in incs],\n 'dirs': []\n }\n\n super(EB_netCDF, self).sanity_check_step(custom_paths=custom_paths)", "def __init__(self):\n self.datasets = [\"ISCCP\",\"ISCCP_raw\",\"PATMOSX\",\"PATMOSX_raw\"]\n f = cdms.open(\"OBS/clt_ISCCP_corrected_198301-200912.nc\")\n fp = cdms.open(\"OBS/clt_PATMOSX_corrected_198301-200912.nc\")\n \n f_old = cdms.open(\"OBS/clt_ISCCP_198307-200806.nc\")\n fp_old = cdms.open(\"OBS/clt_PATMOSX_198200-200912.nc\")\n\n fgpcp = cdms.open(\"OBS/GPCP.precip.mon.mean.nc\")\n fcmap = cdms.open(\"OBS/CMAP.std.precip.mon.mean.nc\")\n \n \n self.ISCCP = f(\"clt\",time=('1984-1-1','2009-12-31'))\n self.ISCCP = MV.masked_where(np.isnan(self.ISCCP),self.ISCCP)\n cdutil.setTimeBoundsMonthly(self.ISCCP)\n\n self.PATMOSX = fp(\"clt\",time=('1984-1-1','2009-12-31'))\n self.PATMOSX = MV.masked_where(np.isnan(self.PATMOSX),self.PATMOSX)\n cdutil.setTimeBoundsMonthly(self.PATMOSX)\n\n self.ISCCP_raw = f_old(\"clt\",time=('1984-1-1','2008-6-31'))\n self.ISCCP_raw = MV.masked_where(np.isnan(self.ISCCP_raw),self.ISCCP_raw)\n cdutil.setTimeBoundsMonthly(self.ISCCP_raw)\n\n self.PATMOSX_raw = fp_old(\"clt\",time=('1982-1-1','2009-12-31'))\n self.PATMOSX_raw = MV.masked_where(np.isnan(self.PATMOSX_raw),self.PATMOSX_raw)\n cdutil.setTimeBoundsMonthly(self.PATMOSX_raw)\n\n self.GPCP = cdutil.averager(fgpcp(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n cdutil.setTimeBoundsMonthly(self.GPCP)\n self.CMAP = cdutil.averager(fcmap(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n self.CMAP.setAxis(0,self.GPCP.getTime())\n cdutil.setTimeBoundsMonthly(self.CMAP)", "def have_cdc() -> bool:", "def __init__(self, datafile):\r\n super(type(self), self).__init__()\r\n log.info(\"Trying to read netcdf \"+datafile)\r\n try:\r\n # We have to uncompress the file to a temporary file\r\n # This will be deleted on __del__ cleanup\r\n datafile = \\\r\n netcdf_reader.netcdfReader.uncompressTempFile(self, datafile) \r\n self.data = arcpy.NetCDFFileProperties(datafile)\r\n except BaseException as e:\r\n log.error(\"Couldn't read netcdf data from file \"+datafile)\r\n log.error(\"Exception {0}\".format(e))\r\n \r\n def __del__(self):\r\n # Close our file before calling superclass, since superclass might delete it\r\n if self.data:\r\n self.data.close()\r\n super(type(self), self).__del__()", "def write_netcdf(file,xc,xc_bnd,yc,yc_bnd,times,hydrographs,fractions,loc,Flist,velocity,diffusion,NODATA,verbose):\n \n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n x = f.createDimension('x',xc.shape[1])\n y = f.createDimension('y',xc.shape[0])\n nv4 = f.createDimension('nv4',4)\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n xcs = f.createVariable('xc','f8',('y','x',))\n ycs = f.createVariable('yc','f8',('y','x',))\n xc_bnds = f.createVariable('xc_bnds','f8',('y','x','nv4',))\n yc_bnds = f.createVariable('yc_bnds','f8',('y','x','nv4',))\n fraction = f.createVariable('fraction','f8',('y','x',),fill_value=NODATA)\n UHS = f.createVariable('unit_hydrograph','f8',('time','y','x',),fill_value=NODATA)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars for full RASM domain'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_lon = loc[0]\n f.outlet_lat = loc[1]\n f.includes = str(len(Flist))+' files'\n\n ycs.long_name = 'latitude of grid cell center'\n ycs.standard_name = 'latitude'\n ycs.units = 'degrees_north'\n ycs._CoordinateAxisType = 'Lat'\n ycs.bounds = 'yc_bnds'\n\n xcs.long_name = 'longitude of grid cell center'\n xcs.standard_name = 'longitude'\n xcs.units = 'degrees_east'\n xcs._CoordinateAxisType = 'Lon'\n xcs.bounds = 'xc_bnds'\n\n time.standard_name = 'time'\n time.units = 'seconds'\n time.description = 'Seconds since initial impulse'\n time.calendar = 'proleptic_gregorian'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to basin outlet location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n xcs[:,:] = xc\n ycs[:,:] = yc\n xc_bnds[:,:,:] = xc_bnd\n yc_bnds[:,:,:] = yc_bnd\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()\n\n return", "def inputs_netCDF(ID, fname, data):\n\n from netCDF4 import Dataset #, date2num, num2date\n from datetime import datetime\n\n print('**** creating SpaFHy input netCDF4 file: ' + fname + ' ****')\n \n # create dataset & dimensions\n ncf = Dataset(fname, 'w')\n ncf.description = 'SpatialData from : ' + str(ID)\n ncf.history = 'created ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n ncf.source = 'SpaFHy v.1.0 inputs'\n \n dlat, dlon = np.shape(data['cmask'])\n\n ncf.createDimension('dlon', int(dlon))\n ncf.createDimension('dlat', int(dlat))\n ncf.createDimension('scalar', 1)\n\n # create variables \n # call as createVariable(varname,type,(dimensions))\n cellsize = ncf.createVariable('cellsize', 'f4', ('scalar',))\n cellsize.units = 'm'\n lat = ncf.createVariable('lat', 'f4', ('dlat',))\n lat.units = 'ETRS-TM35FIN'\n lon = ncf.createVariable('lon', 'f4', ('dlon',))\n lon.units = 'ETRS-TM35FIN'\n\n cellsize[0] = data['cellsize']\n lon[:] = data['lon0']\n lat[:] = data['lat0']\n \n # required inputs\n cmask = ncf.createVariable('cmask', 'i4', ('dlat','dlon',))\n cmask.units = 'integer inside catchment, Nan outside'\n LAI_conif = ncf.createVariable('LAI_conif', 'f4', ('dlat','dlon',))\n LAI_conif.units = 'conifer LAI (m2m-2)'\n LAI_decid = ncf.createVariable('LAI_decid', 'f4', ('dlat','dlon',))\n LAI_decid.units = 'deciduous annual max LAI (m2m-2)' \n hc = ncf.createVariable('hc', 'f4', ('dlat','dlon',))\n hc.units = 'canopy height m' \n cf = ncf.createVariable('cf', 'f4', ('dlat','dlon',))\n cf.units = 'canopy closure (-)' \n \n soilclass = ncf.createVariable('soilclass', 'i4', ('dlat','dlon',))\n soilclass.units = 'soil class (1 - 5)'\n \n flowacc = ncf.createVariable('flowacc', 'f4', ('dlat','dlon',))\n flowacc.units = 'flow accumualtion area m2'\n slope = ncf.createVariable('slope', 'f4', ('dlat','dlon',))\n slope.units = 'local slope (deg)' \n \n for k in ['LAI_conif', 'LAI_decid', 'hc', 'cf', 'soilclass', 'flowacc', 'slope']:\n ncf[k][:,:] = data[k]\n \n print('**** done ****')", "def _initialize_output(self, time_len, id_len):\r\n\r\n log('Initializing new file %s' % self.cf_compliant_file, 'INFO')\r\n \r\n self.cf_nc = Dataset(self.cf_compliant_file, 'w', format='NETCDF3_CLASSIC')\r\n \r\n # Create global attributes\r\n log(' globals', 'DEBUG', self.print_debug)\r\n self.cf_nc.featureType = 'timeSeries'\r\n self.cf_nc.Metadata_Conventions = 'Unidata Dataset Discovery v1.0'\r\n self.cf_nc.Conventions = 'CF-1.6'\r\n self.cf_nc.cdm_data_type = 'Station'\r\n self.cf_nc.nodc_template_version = (\r\n 'NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1')\r\n self.cf_nc.standard_name_vocabulary = ('NetCDF Climate and Forecast (CF) ' +\r\n 'Metadata Convention Standard Name ' +\r\n 'Table v28')\r\n self.cf_nc.title = 'RAPID Result'\r\n self.cf_nc.summary = (\"Results of RAPID river routing simulation. Each river \" +\r\n \"reach (i.e., feature) is represented by a point \" +\r\n \"feature at its midpoint, and is identified by the \" +\r\n \"reach's unique NHDPlus COMID identifier.\")\r\n self.cf_nc.time_coverage_resolution = 'point'\r\n self.cf_nc.geospatial_lat_min = 0.0\r\n self.cf_nc.geospatial_lat_max = 0.0\r\n self.cf_nc.geospatial_lat_units = 'degrees_north'\r\n self.cf_nc.geospatial_lat_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_lon_min = 0.0\r\n self.cf_nc.geospatial_lon_max = 0.0\r\n self.cf_nc.geospatial_lon_units = 'degrees_east'\r\n self.cf_nc.geospatial_lon_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_min = 0.0\r\n self.cf_nc.geospatial_vertical_max = 0.0\r\n self.cf_nc.geospatial_vertical_units = 'm'\r\n self.cf_nc.geospatial_vertical_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_positive = 'up'\r\n self.cf_nc.project = self.project_name\r\n self.cf_nc.processing_level = 'Raw simulation result'\r\n self.cf_nc.keywords_vocabulary = ('NASA/Global Change Master Directory ' +\r\n '(GCMD) Earth Science Keywords. Version ' +\r\n '8.0.0.0.0')\r\n self.cf_nc.keywords = 'DISCHARGE/FLOW'\r\n self.cf_nc.comment = 'Result time step(s) (seconds): ' + str(self.time_step_array)\r\n \r\n timestamp = datetime.utcnow().isoformat() + 'Z'\r\n self.cf_nc.date_created = timestamp\r\n self.cf_nc.history = (timestamp + '; added time, lat, lon, z, crs variables; ' +\r\n 'added metadata to conform to NODC_NetCDF_TimeSeries_' +\r\n 'Orthogonal_Template_v1.1')\r\n \r\n # Create dimensions\r\n log(' dimming', 'DEBUG', self.print_debug)\r\n self.cf_nc.createDimension('time', time_len)\r\n self.cf_nc.createDimension(self.output_id_dim_name, id_len)\r\n \r\n # Create variables\r\n log(' timeSeries_var', 'DEBUG', self.print_debug)\r\n timeSeries_var = self.cf_nc.createVariable(self.output_id_dim_name, 'i4', \r\n (self.output_id_dim_name,))\r\n timeSeries_var.long_name = (\r\n 'Unique NHDPlus COMID identifier for each river reach feature')\r\n timeSeries_var.cf_role = 'timeseries_id'\r\n \r\n log(' time_var', 'DEBUG', self.print_debug)\r\n time_var = self.cf_nc.createVariable('time', 'i4', ('time',))\r\n time_var.long_name = 'time'\r\n time_var.standard_name = 'time'\r\n time_var.units = 'seconds since 1970-01-01 00:00:00 0:00'\r\n time_var.axis = 'T'\r\n \r\n #only add if user adds\r\n if self.comid_lat_lon_z_file and os.path.exists(self.comid_lat_lon_z_file):\r\n log(' lat_var', 'DEBUG', self.print_debug)\r\n lat_var = self.cf_nc.createVariable('lat', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lat_var.long_name = 'latitude'\r\n lat_var.standard_name = 'latitude'\r\n lat_var.units = 'degrees_north'\r\n lat_var.axis = 'Y'\r\n \r\n log(' lon_var', 'DEBUG', self.print_debug)\r\n lon_var = self.cf_nc.createVariable('lon', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lon_var.long_name = 'longitude'\r\n lon_var.standard_name = 'longitude'\r\n lon_var.units = 'degrees_east'\r\n lon_var.axis = 'X'\r\n \r\n log(' z_var', 'DEBUG', self.print_debug)\r\n z_var = self.cf_nc.createVariable('z', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n z_var.long_name = ('Elevation referenced to the North American ' +\r\n 'Vertical Datum of 1988 (NAVD88)')\r\n z_var.standard_name = 'surface_altitude'\r\n z_var.units = 'm'\r\n z_var.axis = 'Z'\r\n z_var.positive = 'up'\r\n \r\n log(' crs_var', 'DEBUG', self.print_debug)\r\n crs_var = self.cf_nc.createVariable('crs', 'i4')\r\n crs_var.grid_mapping_name = 'latitude_longitude'\r\n crs_var.epsg_code = 'EPSG:4326' # WGS 84\r\n crs_var.semi_major_axis = 6378137.0\r\n crs_var.inverse_flattening = 298.257223563", "def save_ecco_dataset_to_netcdf(ecco_ds,\n output_dir,\n dataset_name = 'by_variable',\n time_method = 'by_record',\n output_array_precision = np.float32,\n output_freq_code=None):\n\n\n # Create a name of the files if not specified\n # ---------------------------------------------\n if dataset_name =='by_variable':\n # concat all data variables together into a single string\n dataset_name = '_'.join(list(ecco_ds.data_vars))\n\n\n # force load coordinate values in case they are in dask array\n # -----------------------------------------------------------\n for coord in ecco_ds.coords:\n ecco_ds[coord].load()\n\n\n # Define fill values for NaN\n # ---------------------------------------------\n if output_array_precision == np.float32:\n netcdf_fill_value = nc4.default_fillvals['f4']\n\n elif output_array_precision == np.float64:\n netcdf_fill_value = nc4.default_fillvals['f8']\n\n\n # Create NetCDF encoding directives\n # ---------------------------------------------\n print('\\n... creating variable encodings')\n # ... data variable encoding directives\n dv_encoding = dict()\n for dv in ecco_ds.data_vars:\n dv_encoding[dv] = {'zlib':True, \\\n 'complevel':5,\\\n 'shuffle':True,\\\n '_FillValue':netcdf_fill_value}\n\n # ... coordinate encoding directives\n print('\\n... creating coordinate encodings')\n coord_encoding = dict()\n for coord in ecco_ds.coords:\n # set default no fill value for coordinate\n if output_array_precision == np.float32:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float32'}\n elif output_array_precision == np.float64:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float64'}\n\n # force 64 bit ints to be 32 bit ints\n if (ecco_ds[coord].values.dtype == np.int32) or \\\n (ecco_ds[coord].values.dtype == np.int64) :\n coord_encoding[coord]['dtype'] ='int32'\n\n # fix encoding of time\n if coord == 'time' or coord == 'time_bnds':\n coord_encoding[coord]['dtype'] ='int32'\n\n if 'units' in ecco_ds[coord].attrs:\n # apply units as encoding for time\n coord_encoding[coord]['units'] = ecco_ds[coord].attrs['units']\n # delete from the attributes list\n del ecco_ds[coord].attrs['units']\n\n elif coord == 'time_step':\n coord_encoding[coord]['dtype'] ='int32'\n\n # ... combined data variable and coordinate encoding directives\n encoding = {**dv_encoding, **coord_encoding}\n\n\n # Create directory for output files\n # ---------------------------------------------\n filepath = output_dir / dataset_name\n\n if not filepath.exists():\n filepath.mkdir(parents=True, exist_ok=True)\n\n\n # Determine output freqency code.\n # ---------------------------------------------\n # user can specify directory or it can be found if the dataset\n # has the 'time_coverage_resolution' global attribute\n if output_freq_code == None:\n if 'time_coverage_resolution' in ecco_ds.attrs:\n\n print('dataset time averaging from metadata')\n time_coverage_resolution = ecco_ds.attrs['time_coverage_resolution']\n if time_coverage_resolution == 'P1M':\n output_freq_code='AVG_MON'\n elif time_coverage_resolution == 'P1D':\n output_freq_code='AVG_DAY'\n elif time_coverage_resolution == 'P0S':\n output_freq_code='SNAP'\n else:\n print('output_freq_code not defined and not available in dataset metadata')\n print('... using full record time in filename')\n\n\n # Write records to disk as NetCDF\n # ---------------------------------------------\n # one file per time level\n\n if time_method == 'by_record':\n for time_i, rec_time in enumerate(ecco_ds.time):\n\n cur_ds = ecco_ds.isel(time=time_i)\n\n # cast data variables to desired precision (if necessary)\n #for data_var in cur_ds.data_vars:\n # if cur_ds[data_var].values.dtype != output_array_precision:\n # cur_ds[data_var].values = cur_ds[data_var].astype(output_array_precision)\n\n time_date_info =\\\n make_date_str_from_dt64(cur_ds.time.values, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' + time_date_info['short'] +\\\n '_' + time_date_info['ppp_tttt'] + '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()\n\n # one file per year\n elif time_method == 'by_year':\n unique_years = np.unique(ecco_ds.time.dt.year)\n print(unique_years)\n\n for year in unique_years:\n # pull out only records for this year\n cur_ds = ecco_ds.sel(time=slice(str(year), str(year)))\n\n first_time = cur_ds.time.values[0]\n last_time = cur_ds.time.values[-1]\n\n first_time_date_info =\\\n make_date_str_from_dt64(first_time, output_freq_code)\n\n last_time_date_info =\\\n make_date_str_from_dt64(last_time, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' +\\\n first_time_date_info['short'] + '_' +\\\n last_time_date_info['short'] + '_' +\\\n first_time_date_info['ppp_tttt']+ '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()", "def runTest(self):\n ncfile = netCDF4.Dataset(URL)\n assert varname in ncfile.variables.keys()\n var = ncfile.variables[varname]\n assert var.shape == varshape\n data = var[:]\n assert_array_almost_equal(data.min(),varmin)\n assert_array_almost_equal(data.max(),varmax)\n ncfile.close()\n # test https support (linked curl lib must built with openssl support)\n ncfile = netCDF4.Dataset(URL_https)\n assert(ncfile['sst'].long_name=='Sea Surface Temperature') \n ncfile.close()", "def _write_nc(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n from netCDF4 import Dataset\n grid_nc = Dataset(FN, 'w', format='NETCDF4')\n grid_nc.createDimension('one', 1)\n grid_nc.createDimension('n_cartesian', 3)\n grid_nc.createDimension('n_points', n_points)\n grid_nc.createVariable('origin', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('counts', 'i8', ('one', 'n_cartesian'))\n grid_nc.createVariable('spacing', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('vals', 'f8', ('one', 'n_points'), zlib=True)\n for key in data.keys():\n grid_nc.variables[key][:] = data[key]\n grid_nc.close()", "def runTest(self):\n nc = Dataset(self.file)\n data = nc['vl'][-1]\n # check max error of compression\n err = np.abs(data - self.data)\n assert(err.max() < nc['vl'].scale_factor)\n # turn off auto-scaling\n nc.set_auto_maskandscale(False)\n data = nc['vl'][-1]\n assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))\n nc.close()", "def execute(self, in_nc, in_weight_table, out_nc, grid_name, conversion_flag, in_time_interval=\"6hr\"): # modified this line CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n\r\n # Validate the netcdf dataset\r\n vars_oi_index = self.dataValidation(in_nc)\r\n \r\n \"\"\"get conversion factor the flag is used to differentiate forecasts converted \r\n to netCDF from GRIB and the original netCDF. They both use the same weight tables\r\n but the original netCDF is in mm whereas the stock GRIB forecasts are in meters.\r\n Set the conversion_flag in the run.py configuration file.\r\n \"\"\"\r\n if conversion_flag: # Line Added CJB 20190218\r\n conversion_factor = 1.0 #Line Modified CJB 20190218\r\n elif grid_name == 'ecmwf_t1279' or grid_name == 'ecmwf_tco639': # Line Modified CJB 20190218\r\n #if grid_name == 'ecmwf_HRES_F' or grid_name == 'ecmwf_ENS_F': # Line Added/Modified CJB 20190108\r\n #new grids in mm instead of m\r\n conversion_factor = 0.001\r\n else: #set the conversion factor to 1 for everything else (data is in m but legacy installations do not have a flag) Line Added CJB 20190218\r\n conversion_factor = 1.0 # Line Added CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n # identify if the input netcdf data is the High Resolution data with three different time intervals\r\n id_data = self.dataIdentify(in_nc)\r\n if id_data is None:\r\n raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the netcdf dataset'''\r\n data_in_nc = NET.Dataset(in_nc)\r\n time = data_in_nc.variables['time'][:]\r\n\r\n # Check the size of time variable in the netcdf data\r\n if len(time) == 0: # *** MJS This change seems like it is too loose an error trap; should it account for instances when nc file time var is != in length with id_data lenght?\r\n raise Exception(self.errorMessages[3])\r\n #if len(time) != self.length_time[id_data]:\r\n # raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the weight table '''\r\n print(\"Reading the weight table...\", in_weight_table)\r\n dict_list = {self.header_wt[0]:[], self.header_wt[1]:[], self.header_wt[2]:[],\r\n self.header_wt[3]:[], self.header_wt[4]:[]}\r\n\r\n with open(in_weight_table, \"r\") as csvfile:\r\n reader = csv.reader(csvfile)\r\n count = 0\r\n for row in reader:\r\n if count == 0:\r\n #check number of columns in the weight table\r\n if len(row) < len(self.header_wt):\r\n raise Exception(self.errorMessages[4])\r\n #check header\r\n if row[1:len(self.header_wt)] != self.header_wt[1:]:\r\n raise Exception(self.errorMessages[5])\r\n count += 1\r\n else:\r\n for i in range(len(self.header_wt)):\r\n dict_list[self.header_wt[i]].append(row[i])\r\n count += 1\r\n\r\n ''' Calculate water inflows\r\n as a reminder, the first 91 time steps are T=0 to T=90 and are 1-hourly for HRES\r\n\t\t the next 18 time steps for HRES are T=93 to T=144 at 3-hourly\r\n then the final 16 time steps are T=150 to T=240 at 6-hourly for a total of 125 records\r\n\t\t\tFor ENS, the first 49 time steps are T=0 to T=144 at 3-hourly\r\n\t\t\tthe final 35 time steps are T=150 to T=360 at 6-hourly for a total of 84 records\r\n '''\r\n\t\t\t\r\n print(\"Calculating water inflows...\")\r\n\t\t\r\n ''' \r\n added the next section CJB 20180122 \r\n '''\r\n\r\n\t\t# Get the overall number of time steps\r\n size_time = self.getTimeSize(in_nc) #CJB 20180122\r\n # Determine the size of time steps in each group (1-hourly, 3-hourly, and/or 6-hourly)\r\n if id_data == \"HRES1\": # T <= 90 \r\n time_size = (size_time - 1)\r\n elif id_data == \"HRES13\": # 93 <= T <= 144\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - 1)\r\n elif id_data == \"HRES136\": # 150 <= T <= 240\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n elif in_time_interval == \"3hr\": # MJS Doesn't seem to be a case used currently, but added just in case later need.\r\n time_size = self.length_time_opt[\"HighRes-3hr-sub\"] # MJS This is HRES136, i.e., if for some reason in ecmwf_rapid_multi a 3 hr is asked for for this case, it should still have the 3hr_sub number of times\r\n elif in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - self.length_time_opt[\"HighRes-3hr-Sub\"] - 1)\r\n elif id_data == \"ENS3\": # T <= 144\r\n time_size = (size_time - 1)\r\n elif id_data == \"ENS36\": # 150 <= T <= 360\r\n if in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"LowResFull-3hr-Sub\"] - 1)\r\n else: # id_data == \"ENS6\": # T <= 360 but all 6-hourly\r\n time_size = (size_time - 1)\r\n #else: # something is wrong and need to throw an error message - likely a corrupt forecast file\r\n # raise Exception(self.errorMessages[3])\r\n #''' end of added section CJB 20180122 \r\n #'''\r\n\r\n #if id_data == \"LowRes\":\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #elif id_data == \"Low3HrRes\":\r\n # size_time = self.length_time_opt[\"LowRes-3hr\"]\r\n #elif id_data == \"LowResFull\":\r\n # if in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #else: #HighRes\r\n # if in_time_interval == \"1hr\":\r\n # size_time = self.length_time_opt[\"HighRes-1hr\"]\r\n # elif in_time_interval == \"3hr\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr\"]\r\n # elif in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"HighRes-6hr\"]\r\n\r\n size_streamID = len(set(dict_list[self.header_wt[0]]))\r\n\r\n # Create output inflow netcdf data\r\n # data_out_nc = NET.Dataset(out_nc, \"w\") # by default format = \"NETCDF4\"\r\n data_out_nc = NET.Dataset(out_nc, \"w\", format = \"NETCDF3_CLASSIC\")\r\n #dim_Time = data_out_nc.createDimension('Time', size_time)\r\n dim_Time = data_out_nc.createDimension('Time', time_size)\r\n dim_RiverID = data_out_nc.createDimension('rivid', size_streamID)\r\n var_m3_riv = data_out_nc.createVariable('m3_riv', 'f4', \r\n ('Time', 'rivid'),\r\n fill_value=0)\r\n \r\n #data_temp = NUM.empty(shape = [size_time, size_streamID])\r\n data_temp = NUM.empty(shape = [time_size, size_streamID])\r\n\r\n lon_ind_all = [int(i) for i in dict_list[self.header_wt[2]]]\r\n lat_ind_all = [int(j) for j in dict_list[self.header_wt[3]]]\r\n\r\n # Obtain a subset of runoff data based on the indices in the weight table\r\n min_lon_ind_all = min(lon_ind_all)\r\n max_lon_ind_all = max(lon_ind_all)\r\n min_lat_ind_all = min(lat_ind_all)\r\n max_lat_ind_all = max(lat_ind_all)\r\n\r\n # self.vars_oi[vars_oi_index][3] = RO; get that variable's 3D structure (time, lat_index, lon_index) ready to reshape into 2D (time, lat_index x lon_index)\r\n data_subset_all = data_in_nc.variables[self.vars_oi[vars_oi_index][3]][:, min_lat_ind_all:max_lat_ind_all+1, min_lon_ind_all:max_lon_ind_all+1]\r\n len_time_subset_all = data_subset_all.shape[0]\r\n len_lat_subset_all = data_subset_all.shape[1]\r\n len_lon_subset_all = data_subset_all.shape[2]\r\n data_subset_all = data_subset_all.reshape(len_time_subset_all, (len_lat_subset_all * len_lon_subset_all))\r\n\r\n # compute new indices based on the data_subset_all\r\n index_new = []\r\n for r in range(0,count-1):\r\n ind_lat_orig = lat_ind_all[r]\r\n ind_lon_orig = lon_ind_all[r]\r\n index_new.append((ind_lat_orig - min_lat_ind_all)*len_lon_subset_all + (ind_lon_orig - min_lon_ind_all))\r\n\r\n # obtain a new subset of data\r\n data_subset_new = data_subset_all[:,index_new]*conversion_factor\r\n\r\n # start compute inflow\r\n pointer = 0\r\n for s in range(0, size_streamID):\r\n npoints = int(dict_list[self.header_wt[4]][pointer])\r\n # Check if all npoints points correspond to the same streamID\r\n if len(set(dict_list[self.header_wt[0]][pointer : (pointer + npoints)])) != 1:\r\n print(\"ROW INDEX {0}\".format(pointer))\r\n print(\"RIVID {0}\".format(dict_list[self.header_wt[0]][pointer]))\r\n raise Exception(self.errorMessages[2])\r\n\r\n area_sqm_npoints = [float(k) for k in dict_list[self.header_wt[1]][pointer : (pointer + npoints)]]\r\n area_sqm_npoints = NUM.array(area_sqm_npoints)\r\n area_sqm_npoints = area_sqm_npoints.reshape(1, npoints)\r\n data_goal = data_subset_new[:, pointer:(pointer + npoints)]\r\n \r\n \r\n #remove noise from data\r\n data_goal[data_goal<=0.00001] = 0\r\n\r\n ''' IMPORTANT NOTE: runoff variable in ECMWF dataset is cumulative instead of incremental through time\r\n '''\r\n # For data with Low Resolution, there's only one time interval 6 hrs\r\n if id_data == \"ENS6\": # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints\r\n elif id_data == \"ENS3\": # there's only one time interval 3 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\r\n elif id_data == \"HRES1\": # there's only one time interval 1 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\t\r\n #For data with the full version of Low Resolution, from Hour 0 to 144 (the first 49 time points) are of 3 hr time interval,\r\n # then from Hour 144 to 360 (36 time points) are of 6 hour time interval\r\n elif id_data == \"ENS36\": # Line Added/Modified CJB 20190108\r\n if in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n ro_stream = NUM.subtract(data_goal[1:49,], data_goal[:48,]) * area_sqm_npoints\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[49:,], data_goal[48:-1,]) * area_sqm_npoints\r\n else: #\"LowRes-6hr\"\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240\r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n #convert all to 6hr\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[2:49:2,], data_goal[:48:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[49:,], data_goal[48:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b]) * area_sqm_npoints\r\n #For data with High Resolution, from Hour 0 to 90 (the first 91 time points) are of 1 hr time interval,\r\n # then from Hour 90 to 144 (18 time points) are of 3 hour time interval, and from Hour 144 to 240 (16 time points)\r\n # are of 6 hour time interval\r\n ##########################################################\r\n # MJS The following should handle id_data = HRES13 and HRES136\r\n ##########################################################\r\n else:\r\n if in_time_interval == \"1hr\":\r\n #ro_stream = NUM.subtract(data_goal[1:91,],data_goal[:90,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:1+time_size,],data_goal[:time_size,]) * area_sqm_npoints # Line Added/Modified CJB, MJS 20190108\r\n elif in_time_interval == \"3hr\": # MJS HRES 3hr not currently used\r\n # calculate time series of 3 hr data from 1 hr data\r\n ro_3hr_a = NUM.subtract(data_goal[3:91:3,],data_goal[:88:3,])\r\n # get the time series of 3 hr data\r\n #ro_3hr_b = NUM.subtract(data_goal[91:109,], data_goal[90:108,])\r\n ro_3hr_b = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) # MJS modified again; seems no case for this, but just in case later... Line Added/Modified CJB 20190108\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_3hr_a, ro_3hr_b]) * area_sqm_npoints\r\n elif in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n #ro_stream = NUM.subtract(data_goal[91:109,], data_goal[90:108,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) * area_sqm_npoints # MJS modified again; needs to handle HRES13 that might not have complete 3hr set... Line Added/Modified CJB 20190108\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[109:,], data_goal[108:-1,]) * area_sqm_npoints\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240 \r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n else: # in_time_interval == \"6hr\"\r\n #arcpy.AddMessage(\"6hr\")\r\n # calculate time series of 6 hr data from 1 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[6:91:6,], data_goal[:85:6,])\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[92:109:2,], data_goal[90:107:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_c = NUM.subtract(data_goal[109:,], data_goal[108:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b, ro_6hr_c]) * area_sqm_npoints\r\n \r\n #remove negative values\r\n ro_stream[ro_stream<0] = 0\r\n data_temp[:,s] = ro_stream.sum(axis = 1)\r\n\r\n pointer += npoints\r\n\r\n\r\n '''Write inflow data'''\r\n print(\"Writing inflow data...\")\r\n var_m3_riv[:] = data_temp\r\n # close the input and output netcdf datasets\r\n data_in_nc.close()\r\n data_out_nc.close()", "def check_netcdf_file():\n # check the model file and extract necessary information\n # must be in the argument list\n if NETCDF_FILE_NAME is None:\n print('[ERROR] the netCDF model file name is required', flush=True)\n usage_csv()\n sys.exit(1)\n\n # user may provide full path\n elif os.path.isfile(NETCDF_FILE_NAME):\n model_file_name = NETCDF_FILE_NAME\n base_file_name, ext = os.path.splitext(model_file_name)\n\n # user may place it under the data directory\n elif os.path.isfile(os.path.join(DATA_DIR, NETCDF_FILE_NAME)):\n model_file_name = os.path.join(DATA_DIR, NETCDF_FILE_NAME)\n base_file_name, ext = os.path.splitext(model_file_name)\n\n # could not find the file\n else:\n print('[ERROR] could not find the netCDF model file {}'.format(NETCDF_FILE_NAME), flush=True)\n usage_csv()\n sys.exit(1)\n\n return model_file_name, base_file_name", "def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp", "def verify_netcdf(extents_dir, out_ncfile):\n netcdf_old=out_ncfile #'/g/data/fk4/wofs/water_f7q/extents/149_-036/LS_WATER_149_-036_1987-05-22T23-08-20.154_2014-03-28T23-47-03.171.nc'\n\n tiles = [make_tileinfo(filename) for filename in glob(os.path.join(extents_dir, '*.tif'))]\n tiles.sort(key=lambda t: t.datetime)\n\n with netCDF4.Dataset(netcdf_old) as nco:\n for i in range(0,len(tiles)):\n print nco['time'][i]\n print tiles[i]\n with rasterio.open(tiles[i].filename) as tile_data:\n print \"Any difference? \" \n print numpy.sum(nco['Data'][:,:,i])\n print numpy.sum(tile_data.read(1))\n\n print type(nco['Data'][:,:,i]), type(tile_data.read(1))\n print nco['Data'][:,:,i].shape, tile_data.read(1).shape\n \n print numpy.sum(nco['Data'][:,:,i] - tile_data.read(1)[:,:])\n #print tile_data.read(1)[0:100,0:100] \n\n #print (nco['Data'][:,:,i] == tile_data.read(1)).all()", "def is_valid_netcdf_file(nc_data):\n fname = Path(nc_data.filepath()).name\n \n start_str = fname.split(\"_\")[3][1:-1]\n start_fname = dt.datetime.strptime(\n start_str + \" UTC\",\n \"%Y%j%H%M%S %Z\",\n )\n start_fname = start_fname.replace(tzinfo=dt.timezone.utc)\n end_str = fname.split(\"_\")[4][1:-1]\n end_fname = dt.datetime.strptime(end_str + \" UTC\", \"%Y%j%H%M%S %Z\")\n end_fname = end_fname.replace(tzinfo=dt.timezone.utc)\n \n avg_fname = start_fname + (end_fname - start_fname) / 2\n \n vtime = get_valid_time(nc_data)\n if vtime is None:\n return False\n \n diff = (avg_fname - vtime).total_seconds()\n \n if diff > 60:\n return False\n \n return True", "def write_netcdf(ncinfo):\r\n\t# ========== Create new netcdf ==========\r\n\tNAME=nc.netcdf_file(ncinfo.fname,'w')\r\n\t\r\n\t# ========== Set up the Dimensions ==========\r\n\tNAME.createDimension('time', None) #Question: Shouldn't time be unlimited?\r\n\t# NAME.createDimension('lev',11)\r\n\tNAME.createDimension('lat',ncinfo.lat)\r\n\tNAME.createDimension('lon',ncinfo.lon)\r\n\t\r\n\t# ========== Setup the Variables ==========\r\n\ttime=NAME.createVariable('time',np.float64,('time',))\r\n\t# lev=NAME.createVariable('lev',np.int32,('lev',))\r\n\tlat=NAME.createVariable('lat',np.float64,('lat',))\r\n\tlon=NAME.createVariable('lon',np.float64,('lon',))\r\n\t# VAR=NAME.createVariable(str(VAR),np.float64,('time','lev','lat','lon'),)\r\n\tVAR=NAME.createVariable(ncinfo.var_name,np.float64,('time','lat','lon'),)\r\n\t# setting the missing value is super important for the file to be cdo readable\r\n\tsetattr(VAR,'missing_value',ncinfo.fill)\r\n\tsetattr(VAR, 'standard_name', ncinfo.var_lname) \r\n\t\r\n\t# ========== Set the units ==========\r\n\ttime.units= 'day as %Y%m%d'\r\n\t# lev.units = '-'\r\n\tlat.units = 'degrees_north'\r\n\tlon.units = 'degrees_east'\r\n\tVAR.units = ncinfo.units\r\n\r\n\t# ========== Add data ==========\r\n\t\r\n\t# creates time vector using the date_range function\r\n\t# time[:]=[t for t in date_range('20110101.5','20111231.5')] \r\n\t# lev[:]=PFT_vector\r\n\tlat[:] = ncinfo.latitudes\r\n\tlon[:] = ncinfo.longitudes\r\n\t# THis is a Bodge for singe variable data\r\n\tVAR[:] = ncinfo.data\r\n\r\n\t#Add global attributes\r\n\tNAME.description = ncinfo.description\r\n\tNAME.history = ncinfo.history\r\n\r\n\t# WHATS MISSING\r\n\t# metadata a whole bunch of metadata\r\n\t# the standard_name and long_name of the variables\r\n\r\n\t# ========== Close the netcdf ==========\r\n\tNAME.close()", "def cl_file(tmp_path):\n nc_path = os.path.join(tmp_path, 'cesm2_waccm_cl.nc')\n dataset = Dataset(nc_path, mode='w')\n dataset.createDimension('lev', size=2)\n dataset.createDimension('bnds', size=2)\n\n # Dimensional variables\n dataset.createVariable('lev', np.float64, dimensions=('lev',))\n dataset.createVariable('lev_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['lev'][:] = [1.0, 2.0]\n dataset.variables['lev'].bounds = 'lev_bnds'\n dataset.variables['lev'].units = '1'\n dataset.variables['lev_bnds'][:] = [[0.5, 1.5], [1.5, 3.0]]\n dataset.variables['lev_bnds'].standard_name = (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n dataset.variables['lev_bnds'].units = '1'\n dataset.variables['lev_bnds'].formula_terms = (\n 'p0: p0 a: a_bnds b: b_bnds ps: ps')\n\n # Coordinates for derivation of pressure coordinate\n dataset.createVariable('a', np.float64, dimensions=('lev',))\n dataset.createVariable('a_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.createVariable('b', np.float64, dimensions=('lev',))\n dataset.createVariable('b_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['a'][:] = [1.0, 2.0]\n dataset.variables['a'].bounds = 'a_bnds'\n dataset.variables['a_bnds'][:] = [[1.5, 0.0], [3.0, 1.5]]\n dataset.variables['b'][:] = [0.0, 1.0]\n dataset.variables['b'].bounds = 'b_bnds'\n dataset.variables['b_bnds'][:] = [[0.5, -1.0], [2.0, 0.5]]\n\n dataset.close()\n return nc_path", "def repair_netcdf(fname):\n\n\t# ========== Set the path and the file name ==========\n\t# fname = \"%s_%s_%s_r1i1p1_%s_1950_2050_%s_regrid.nc\" %(var, model, sen, units, sen)\n\tfout = \"%s_setgrid\" % (fname)\n\n\t\n\t# ========== Create a list of files to cleanup ==========\n\tcleanup = []\n\n\t# ========== Check if the file exists ==========\n\tif not os.path.isfile(fname+\".nc\"):\n\t\t# check if the file exists with a different name\n\t\traise IOError(\"WARNING: The file %s cannot be found\"% fname)\n\n\t\n\t# ========== Read longitude from NC file ==========\n\tfh = Dataset(fname+\".nc\", mode='r')\n\ttry:\n\t\tlon = fh.variables['longitude'][:]\n\texcept:\n\t\ttry:\n\t\t\tlon = fh.variables['lon'][:]\n\t\texcept:\n\t\t\tlon = fh.variables['easting'][:] #easting\n\n\n\n\n\t# ========== Create a new grid ==========\n\t# Save the current grid\n\tsubp.call(\"cdo griddes %s.nc > %sGriddes\" % (fname, fname), shell=True)\n\t# add the griddes to the cleanup \n\tcleanup.append(\"%sGriddes\" % fname)\n\n\t# open the current grid\n\tgfile = open(\"%sGriddes\" % fname, \"r\") \n\t# Split the lines of the grid file\n\tginfo = gfile.read().splitlines()\n\t\n\t#Some models have no lat/lon bounds, skip in this case and copy\n\t#\"regrid\" file as \"setgrid\"\n\tif not (any([n.startswith(\"xbounds\") for n in ginfo]) and \n\t\t any([n.startswith(\"ybounds\") for n in ginfo])):\n\t\tsubp.call(\"cp %s.nc %s.nc\" % (fname, fout), shell=True)\n\t\tcleanup.append(\"%s.nc\" % fname)\n\t\treturn cleanup\t\n\t\n\t# Check and see if the start is known\n\tif (\n\t\tany([n.startswith(\"xfirst\") for n in ginfo])\n\t\t) and (\n\t\tany([n.startswith(\"xinc\") for n in ginfo])\n\t\t):\n\t\taddxdet = False\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\"]\n\telse:\n\t\taddxdet = True\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\", \"xfirst\", \"xinc\"]\n\n\t# Create list to hold the new grid details\n\tnew_grid = []\n\n\tfor ginf in ginfo:\n\t\ttest = []\n\t\tfor be in badel:\n\t\t\tif ginf.startswith(be):\n\t\t\t\ttest.append(False)\n\t\t\telif ginf == \"#\":\n\t\t\t\ttest.append(False)\n\t\t\telse:\n\t\t\t\ttest.append(True)\n\t\t\n\t\tif all(test):\n\t\t\tnew_grid.append(ginf)\n\t# Add the additional x variables\n\tif addxdet:\n\t\t# work out the model from the fname\n\t\tmodel = fname.split(\"/\")[-2]\n\t\tnew_grid.append('xfirst = -180')\n\t\tnew_grid.append('xinc = %s' % str(\n\t\t\tfloat(lon) ))\n\t\n\n\t# Check the y values, if they are missing use the ones in the original grid file\n\tif not (any([n.startswith(\"yfirst\") for n in ginfo])):\n\t\t# print (\"Seting the y bounds\")\n\t\tvals = []\n\t\tfor glov in range(0,len(ginfo)):\n\t\t\tif ginfo[glov].startswith(\"yvals\"):\n\t\t\t\tvals.append(glov)\n\t\t\telif ginfo[glov].startswith(\"ybounds\"):\n\t\t\t\tvals.append(glov)\n\t\tif len (vals) == 2:\n\t\t\tfor yv in ginfo[vals[0]:vals[1]]:\n\t\t\t\tnew_grid.append(yv)\n\n\t\telse:\n\t\t\tprint(\"\\n\")\n\t\t\traise IndexError(\"Bounding is incorrect\")\n\n\t# Save the grid out\n\tnewgrid = save_grid(fname, new_grid)\n\tcleanup.append(newgrid)\n\n\t# ========== Set the new grid file ==========\n\t# Save the current grid\n\tsubp.call(\"cdo setgrid,%sGridFix %s.nc %s.nc\" % (fname, fname, fout), shell=True)\n\t\n\tif not os.path.isfile(\"%s.nc\" % fout):\n\t\traise IOError(\"The output file was not created, going interactive\")\n\t\n\t# ========== return the files to be removed ==========\n\tcleanup.append(\"%s.nc\" % fname)\n\treturn cleanup", "def testCC(self):\n self.assertEqual(\n self.cc,\n self.ccr.cc\n )\n\n self.assertEqual(\n None,\n self.ccr_bad.cc\n )", "def _validate_raw_nc(self):\r\n\r\n self.raw_nc_list = []\r\n total_time_len = 1 #add one for the first flow value RAPID\r\n #does not include\r\n id_len_list = []\r\n for rapid_output_file in self.rapid_output_file_list:\r\n qout_nc = RAPIDDataset(rapid_output_file)\r\n id_len_list.append(qout_nc.size_river_id)\r\n total_time_len += qout_nc.size_time\r\n self.raw_nc_list.append(qout_nc)\r\n \r\n #make sure river id lists are the same\r\n for id_len_undex in range(1, len(id_len_list)):\r\n if id_len_list[id_len_undex] != id_len_list[0]:\r\n raise Exception(\"ERROR: River ID size is different in one of the files ...\")\r\n \r\n for raw_nc_index in range(1, len(self.raw_nc_list)):\r\n if not (self.raw_nc_list[raw_nc_index].get_river_id_array() == self.raw_nc_list[0].get_river_id_array()).all():\r\n raise Exception(\"ERROR: River IDs are different in files ...\")\r\n\r\n return id_len_list[0], total_time_len", "def processData(self,data):\n #print 'I GOT DATA',data,[0],data[1]\n # Check for valid data (not null or empty string)\n #print '**************NOTIFICATION***************',type(_RobotCommunicator.WALL_HEADER),type(data[0])\n if data:\n #print '**************NOTIFICATION***************',type(_RobotCommunicator.WALL_HEADER),type(data[0]),_RobotCommunicator.WALL_HEADER==data[0]\n\n # Check header and assign data appropriately\n # TODO: Check length of data for validity\n #print 'Header',data[0]\n if data[0] == _RobotCommunicator.POSE_HEADER:\n self.pose = unpack(_RobotCommunicator.POSE_FORMAT,data[1:])\n elif data[0] == _RobotCommunicator.SENSOR_HEADER:\n\n #for i in range(1, len(data)-1, 2):\n index= unpack('B',data[1])\n value = unpack('?',data[2])\n # Update old values or create new sensor-value pair\n self.sensors[index[0]] = value[0]\n #print 'in csharp: ',[index,value]\n\n elif data[0] == _RobotCommunicator.WAYPOINT_HEADER:\n self.waypoints = [] # Clear old waypoints\n for i in range(1, len(data)-16, 16):\n x,y = unpack(_RobotCommunicator.WAYPOINT_FORMAT,\n data[i:i+15])\n self.waypoints.append((x,y))\n elif data[0] == _RobotCommunicator.DIRECTION_HEADER:\n self.direction = unpack(_RobotCommunicator.DIRECTION_FORMAT,\n data[1:])\n elif data[0] == _RobotCommunicator.ACTUATOR_HEADER:\n self.actuators = [] # Clear old actuator commands for i in range(1, len(data)-1):\n self.actuators.append(unpack(\n _RobotCommunicator.ACTUATOR_FORMAT,data[i]))\n elif data[0] == _RobotCommunicator.WALL_HEADER:\n self.walls = {} # Clear old wall entries\n index = unpack('B', data[1])\n x1,y1,x2,y2 = unpack(_RobotCommunicator.WALL_FORMAT,data[2:34])\n self.walls = (x1,y1,x2,y2)\n #print '**************Coordinates***************',(x1,y1,x2,y2)\n print '****self.walls*********',self.walls\n elif data[0] == _RobotCommunicator.OBS_HEADER:\n index = unpack('B', data[1])\n add,x1,y1 = unpack(_RobotCommunicator.OBS_FORMAT,data[2:26])\n #print '***********self.obs*************'+','.join(map(str,[add,x1,y1]))\n self.obs = [add,x1,round(y1,2)]\n if add == 1:\n a = PolyShapes.Rectangle(self.resolX,self.resolY)\n a.shift(x1,y1)\n self.obsPoly += a\n self.receiveObs = True\n #print \"add obstacle:\" + str(x1) + \",\"+ str(y1)\n elif add == 4:\n if x1 == 0:\n self.STOP = True\n else:\n self.STOP = False\n else:\n a = PolyShapes.Rectangle(self.resolX,self.resolY)\n a.shift(x1,y1)\n self.obsPoly -= a\n self.receiveObs = True\n #print \"del obstacle:\"+ str(x1) + \",\"+ str(y1)\n\n\n else:\n print \"Unexpected or corrupted data packet received.\"", "def Save2Nc(self):\r\n\r\n frameNumber = self.spinBox_FrameNum.value()\r\n\r\n segmentNumber = self.spinBox_SegmentNum.value()\r\n\r\n exposeTime = self.spinBox_ExpTime.value()\r\n width = self.spinBox_Width.value()\r\n xshift = self.spinBox_XShift.value()\r\n hight = self.spinBox_Hight.value()\r\n yshift = self.spinBox_Yshift.value()\r\n\r\n print(\"frameNumber, segmentNumber, width, high is: \", frameNumber, segmentNumber, width, hight)\r\n app = ReadData(noteObj = self.textBrowser_SetMeasureInf, frameNumber=frameNumber, segmentFrame=segmentNumber, width=width, hight=hight)\r\n self.multiFrameData = app.ImageData()\r\n\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n # it just provides the name of file that you want to write into\r\n fileName, _= QFileDialog.getSaveFileName(self,\"QFileDialog.getSaveFileName()\",\"\",\"All Files (*);;NC Files (*.nc)\", options=options)\r\n \r\n if fileName:\r\n print(fileName)\r\n\r\n self.multiFrameData.to_netcdf(fileName + '.nc')\r\n self.textBrowser_SetMeasureInf.setTextColor(QtCore.Qt.green)\r\n self.textBrowser_SetMeasureInf.append(\"the data has saved as .nc file! \")", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()", "def test_noDuplicateCTCPDispatch(self):\n\n def testCTCP(user, channel, data):\n self.called += 1\n\n self.called = 0\n self.client.ctcpQuery_TESTTHIS = testCTCP\n\n self.client.irc_PRIVMSG(\n \"[email protected]\",\n [\"#chan\", \"{X}TESTTHIS{X}foo{X}TESTTHIS{X}\".format(X=irc.X_DELIM)],\n )\n self.assertEqualBufferValue(self.file.getvalue(), \"\")\n self.assertEqual(self.called, 1)", "def test_set_circulationdata_errors(self):\n provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection\n )\n identifier = self._identifier()\n\n # No data.\n failure = provider._set_circulationdata(identifier, None)\n assert (\"Did not receive circulationdata from input source\" ==\n failure.exception)\n\n # No identifier in CirculationData.\n empty = CirculationData(provider.data_source, primary_identifier=None)\n failure = provider._set_circulationdata(identifier, empty)\n assert (\"Identifier did not match CirculationData's primary identifier.\" ==\n failure.exception)\n\n # Mismatched identifier in CirculationData.\n wrong = CirculationData(provider.data_source,\n primary_identifier=self._identifier())\n failure = provider._set_circulationdata(identifier, empty)\n assert (\"Identifier did not match CirculationData's primary identifier.\" ==\n failure.exception)\n\n # Here, the data is okay, but the ReplacementPolicy is\n # going to cause an error the first time we try to use it.\n correct = CirculationData(provider.data_source,\n identifier)\n provider.replacement_policy = object()\n failure = provider._set_circulationdata(identifier, correct)\n assert isinstance(failure, CoverageFailure)\n\n # Verify that the general error handling works whether or not\n # the provider is associated with a Collection.\n provider.collection_id = None\n failure = provider._set_circulationdata(identifier, correct)\n assert isinstance(failure, CoverageFailure)", "def cdf_to_nc(cdf_filename):\n\n # Load raw .cdf data\n ds = xr.load_dataset(cdf_filename)\n\n # create burst num variable\n ds = burst_num(ds)\n\n # Clip data to in/out water times or via good_ens\n ds = utils.clip_ds(ds)\n\n # Trim data when instrument is out of water during a deployment and extra bins if good_bins specified\n if \"bins\" in ds:\n ds = trim_alt(ds)\n else:\n ds = trim_alt(ds, data_vars=[\"Altitude_m\", \"AmplitudeFS\", \"Temperature_C\"])\n\n # calculate bin height for profiling echologger (i.e. type == ea)\n if \"bins\" in ds:\n ds = calc_bin_height(ds)\n\n # calculate corrected altitude (distance to bed/b_range) with adjusted sound speed\n ds = calc_cor_brange(ds)\n\n # calculate corrected bin height (on NAVD88 datum) with adjusted sound speed\n if \"bins\" in ds:\n ds = calc_cor_bin_height(ds)\n\n ds = calc_seabed_elev(ds)\n\n # if \"bins\" in ds:\n # ds = utils.create_z_bindist(ds) #create z for profile\n # else:\n ds = utils.create_z(ds)\n\n # swap bin dim with bin_height\n ds = ds_swap_dims(ds) # swap vert dim to z\n\n # rename variables\n ds = ds_rename_vars(ds)\n\n # drop some vars after renaming\n for k in [\n \"bins\",\n \"ping\",\n \"ping_num_in_series\",\n \"Altitude_m\",\n \"Battery_mV\",\n ]:\n if k in ds:\n ds = ds.drop_vars(k)\n\n # add lat/lons as coordinates\n ds = utils.ds_add_lat_lon(ds)\n\n # add attributes to each variable\n ds = ds_add_attrs(ds)\n\n # add metadata to global atts\n ds = utils.add_start_stop_time(ds)\n ds = utils.add_delta_t(ds)\n\n # Write to .nc file\n print(\"Writing cleaned/trimmed burst data and averaged burst data to .nc file\")\n nc_filename = ds.attrs[\"filename\"] + \"b-cal.nc\"\n\n ds.to_netcdf(\n nc_filename, unlimited_dims=[\"time\"], encoding={\"time\": {\"dtype\": \"i4\"}}\n )\n print(\"Done writing netCDF file\", nc_filename)\n\n # Average busrt and write to -a.nc file\n ds = average_burst(ds)\n\n for var in ds.data_vars:\n # do any diff trimming first\n ds = qaqc.trim_min_diff(ds, var)\n ds = qaqc.trim_min_diff_pct(ds, var)\n ds = qaqc.trim_max_diff(ds, var)\n ds = qaqc.trim_max_diff_pct(ds, var)\n ds = qaqc.trim_med_diff(ds, var)\n ds = qaqc.trim_med_diff_pct(ds, var)\n ds = qaqc.trim_maxabs_diff_2d(ds, var)\n ds = qaqc.trim_maxabs_diff(ds, var)\n # then do other trimming\n ds = qaqc.trim_bad_ens(ds, var)\n ds = qaqc.trim_min(ds, var)\n ds = qaqc.trim_max(ds, var)\n ds = aqdutils.trim_single_bins(ds, var)\n ds = qaqc.trim_fliers(ds, var)\n\n # after check for masking vars by others\n for var in ds.data_vars:\n ds = qaqc.trim_mask(ds, var)\n\n # assign min/max\n ds = utils.add_min_max(ds)\n\n ds = utils.ds_coord_no_fillvalue(ds)\n\n nc_filename = ds.attrs[\"filename\"] + \"-a.nc\"\n\n # ds['time']=ds['time'].astype('datetime64[s]')\n ds.to_netcdf(\n nc_filename, unlimited_dims=[\"time\"], encoding={\"time\": {\"dtype\": \"i4\"}}\n )\n\n utils.check_compliance(nc_filename, conventions=ds.attrs[\"Conventions\"])\n\n print(\"Done writing burst averaged netCDF file\", nc_filename)\n\n return ds", "def testCCHalt(self):\n cdl_convert.config.HALT_ON_ERROR = True\n\n def getCC():\n self.ccr_bad.cc\n\n self.assertRaises(\n ValueError,\n getCC\n )", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source", "def createNCDF(self):\n\n rootgrp = Dataset(self.filename_out, 'w', format=self.format, clobber=True)\n\n # Create dimensions.\n if 'dimensions' in self.input_dict:\n for k, v in self.input_dict['dimensions'].items():\n rootgrp.createDimension(k, v)\n else:\n if not self.Quiet:\n print('No netCDF created:')\n print(' No dimension key found (!! has to be \\\"dimensions\\\"!!!)')\n return()\n\n # Create global attributes.\n if 'global attributes' in self.input_dict:\n for k, v in self.input_dict['global attributes'].items():\n rootgrp.setncattr(k, v)\n else:\n if not self.Quiet:\n print(' No global attribute key found (!! has to be \\\"global attributes\\\"!!!)')\n\n # Create variables.\n for k, v in self.input_dict['variables'].items():\n dims = self.input_dict['variables'][k]['dimensions']\n data = v['data']\n # Create correct data type if provided\n if 'data_type' in self.input_dict['variables'][k]:\n data_type = self.input_dict['variables'][k]['data_type']\n else:\n data_type = 'f4'\n # Check whether we've been given a fill value.\n if 'fill_value' in self.input_dict['variables'][k]:\n fill_value = self.input_dict['variables'][k]['fill_value']\n else:\n fill_value = None\n # Create ncdf variable\n if not self.Quiet:\n print(' Creating variable: {} {} {}'.format(k, data_type, dims))\n var = rootgrp.createVariable(k, data_type, dims, fill_value=fill_value)\n if len(dims) > np.ndim(data):\n # If number of dimensions given to netCDF is greater than the\n # number of dimension of the data, then fill the netCDF\n # variable accordingly.\n if 'time' in dims:\n # Check for presence of time dimension (which can be\n # unlimited variable: defined by None).\n try:\n var[:] = data\n except IndexError:\n raise(IndexError(('Supplied data shape {} does not match the specified'\n ' dimensions {}, for variable \\'{}\\'.'.format(data.shape, var.shape, k))))\n else:\n if not self.Quiet:\n print('Problem in the number of dimensions')\n else:\n try:\n var[:] = data\n except IndexError:\n raise(IndexError(('Supplied data shape {} does not match the specified'\n ' dimensions {}, for variable \\'{}\\'.'.format(data.shape, var.shape, k))))\n\n # Create attributes for variables\n if 'attributes' in self.input_dict['variables'][k]:\n for ka, va in self.input_dict['variables'][k]['attributes'].items():\n var.setncattr(ka, va)\n\n rootgrp.close()", "def test_save_as_netcdf(self):\n target_dims = self.target.dimensions\n expected_target_dims = [(u'MMAXZ', 4), \n (u'NMAXZ', 4), \n (u'MMAX', 4), \n (u'NMAX', 4), \n (u'KMAX', 2), \n (u'KMAX1', 3), \n (u'time', 2)\n ]\n target_vars = self.target.variables\n expected_target_vars = [u'XZ',\n u'YZ',\n u'XCOR',\n u'YCOR',\n u'grid',\n u'U1',\n u'FAKE_U1',\n u'V1',\n u'W',\n u'FAKE_W',\n u'time',\n u'latitude',\n u'longitude',\n u'grid_latitude',\n u'grid_longitude'\n ]\n target_grid_vars = self.target.grid_variables\n expected_target_grid_vars = [u'U1',\n u'FAKE_U1',\n u'V1',\n u'W',\n u'FAKE_W'\n ]\n target_face_coordinates = self.target.face_coordinates\n expected_target_face_coordinates = (u'XZ', u'YZ')\n self.assertIsInstance(self.target, SGrid2D)\n self.assertEqual(len(target_dims), len(expected_target_dims))\n self.assertEqual(set(target_dims), set(expected_target_dims))\n self.assertEqual(len(target_vars), len(expected_target_vars))\n self.assertEqual(set(target_vars), set(expected_target_vars))\n self.assertEqual(len(target_grid_vars), len(expected_target_grid_vars))\n self.assertEqual(set(target_grid_vars), set(expected_target_grid_vars))\n self.assertEqual(target_face_coordinates, expected_target_face_coordinates)", "def prepare_val_coco_data(args):\n image_dir, annotation_file = args.val_coco_image_dir, args.val_coco_annotation_file\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name']))\n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n\n print(\"Building the validation dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return coco, dataset", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def createncfile(dz_id,t,x,z):\n db = labdb.LabDB()\n #create the directory in which to store the nc file\n sql = \"\"\"INSERT into dn2t (dz_id) VALUES (%d)\"\"\" % (dz_id) \n db.execute(sql)\n sql = \"\"\"SELECT LAST_INSERT_ID()\"\"\" \n rows = db.execute(sql)\n dn2t_id = rows[0][0]\n dn2t_path = \"/Volumes/HD4/dn2t/%d\" % dn2t_id \n os.mkdir(dn2t_path)\n\n dn2t_filename = os.path.join(dn2t_path,\"dn2t.nc\")\n print(\"d(N2)/dt filename : \",dn2t_filename)\n\n\n # Declare the nc file for the first time\n nc = netCDF4.Dataset(dn2t_filename,'w',format = 'NETCDF4')\n row_dim = nc.createDimension('row',964)\n col_dim = nc.createDimension('column',1292)\n lenT=t.shape[0] #lenT is the length of the dn2t file.Its 1 element shorter in time axis than deltaN2\n print(\"time axis length\",lenT) # debug info\n t_dim = nc.createDimension('time',lenT)\n\n # Dimensions are also variable\n ROW = nc.createVariable('row',numpy.float32,('row'))\n print(list(nc.dimensions.keys()), ROW.shape,ROW.dtype)\n COLUMN = nc.createVariable('column',numpy.float32,('column'))\n print(list(nc.dimensions.keys()) , COLUMN.shape, COLUMN.dtype)\n TIME = nc.createVariable('time',numpy.float32,('time'))\n print(list(nc.dimensions.keys()) ,TIME.shape, TIME.dtype)\n\n # declare the 3D data variable \n dn2t = nc.createVariable('dn2t_array',numpy.float32,('time','row','column'))\n print(list(nc.dimensions.keys()) ,dn2t.shape,dn2t.dtype)\n\n # assign the values\n TIME[:] = t\n ROW[:] = z\n COLUMN[:] = x\n\n nc.close()\n db.commit()\n return dn2t_id,dn2t_filename", "def decomptcperrlessdata(self) :\n\t\ttry :\n\t\t\treturn self._decomptcperrlessdata\n\t\texcept Exception as e:\n\t\t\traise e", "def create_netcdf(self):\n\n # NetCDF file and global attributes\n file_name = self.output_dir / f\"{author.replace(' ', '_').lower()}_{self.sos_file.split('_')[0]}.nc\"\n ds = Dataset(file_name, 'w')\n ds.author = self.author\n ds.contact = self.email\n ds.sos_file = self.sos_file\n ds.production_date = datetime.now().strftime('%d-%b-%Y %H:%M:%S')\n\n # Groups\n for source in self.priors_dict.keys():\n # Check if the group has data\n if self.priors_dict[source]:\n # Create groups for each prior and populate with data\n for prior, data in self.priors_dict[source].items():\n # Group\n g = ds.createGroup(f\"{source}_{prior}\")\n\n # Attribute\n g.run_type = data[\"run_type\"]\n\n # Dimensions\n if \"reach_ids\" in data.keys(): \n self.create_dimensions(source, g, num_reaches=len(data[\"reach_ids\"]))\n else:\n self.create_dimensions(source, g, num_nodes=len(data[\"node_ids\"]))\n\n # Variables\n self.create_variables(source, prior, data, g)\n\n # Close dataset file\n ds.close()", "def _check_data(data):\n if not (data.dtype == _np.float32 and data.flags.c_contiguous):\n raise ValueError('supplied data must be float32 and C contiguous')\n if data.ndim == 2:\n num_frames, channels = data.shape\n elif data.ndim == 1:\n num_frames, channels = data.size, 1\n else:\n raise ValueError('rank > 2 not supported')\n return num_frames, channels", "def to_netcdf(self, outfile):", "def test_cl_fix_data(cl_cube):\n fix = Cl(None)\n out_cube = fix.fix_data(cl_cube)\n assert out_cube.data == [100.0]", "def test_simple(self):\n with self.subTest(\"from np array\"):\n data = np.random.rand(10, 4)\n mask = np.ones((10, 4))\n channels = [\"a\", \"b\", \"c\", \"d\"]\n fcs.FCSData((data, mask), channels=channels)", "def data_fetch_netcdf(self):\n self.client = boto3.client('s3', aws_access_key_id=self.creds_data['key_id'],\n aws_secret_access_key=self.creds_data['key_access'])\n year = self.month_year[0]\n month = self.month_year[1]\n # change output folder to desired location from TRMM website\n # folder structure to partitioned the data year_month\n output_temp = self.output_folder + year + '_' + month\n url_data = \"http://trmm.atmos.washington.edu/{}interp_data/{}/{}\".format(self.output_folder, year, month)\n print(url_data)\n start_time_year_month = time.time()\n r = requests.get(url_data, auth=self.auth_data)\n # check if url exists then extract netcdf links to download and upload to s3.\n if r.status_code == 200:\n soup = BeautifulSoup(r.text, features='lxml')\n for link in soup.findAll('a'):\n link_url = link.get('href')\n write_path = os.path.join(output_temp, link_url)\n if link_url.endswith('.nc4'):\n file_url = url_data + '/' + link_url\n r = requests.get(file_url, auth=self.auth_data, stream=True)\n if r.status_code == 200:\n self.client.put_object(Body=r.content, Bucket='himatdata', Key='Trmm/' + write_path)\n logging.info(\"Done with Year Month: %s\", month_year)\n print(\"--- %s seconds ---\" % (time.time() - start_time_year_month))\n\n else:\n print('No data/authentication for'.format(month_year))", "def test_cl_fix_file(mock_get_filepath, cl_file, tmp_path):\n mock_get_filepath.return_value = os.path.join(tmp_path,\n 'fixed_cesm2_waccm_cl.nc')\n fix = Cl(None)\n fixed_file = fix.fix_file(cl_file, tmp_path)\n mock_get_filepath.assert_called_once_with(tmp_path, cl_file)\n fixed_dataset = Dataset(fixed_file, mode='r')\n assert fixed_dataset.variables['lev'].standard_name == (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n assert fixed_dataset.variables['lev'].formula_terms == (\n 'p0: p0 a: a b: b ps: ps')\n assert fixed_dataset.variables['lev'].units == '1'\n np.testing.assert_allclose(fixed_dataset.variables['a'][:], [1.0, 2.0])\n np.testing.assert_allclose(fixed_dataset.variables['b'][:], [0.0, 1.0])\n np.testing.assert_allclose(fixed_dataset.variables['a_bnds'][:],\n [[0.0, 1.5], [1.5, 3.0]])\n np.testing.assert_allclose(fixed_dataset.variables['b_bnds'][:],\n [[-1.0, 0.5], [0.5, 2.0]])", "def test_recv(self):\n # Required to get useful test names\n super(TestCisPandasInput_local, self).test_recv()", "def prepare_test_data():\n # Dictionary in which to store data\n data_dict = {}\n # Load data01.nc Dataset\n data01 = xr.open_dataset(os.path.dirname(__file__)+'/data/data01.nc',\n decode_times=False, autoclose=True)\n data_dict['data01'] = data01.copy()\n # Extract two *DataArrays* - to test functions with DataArrays\n da_ts = data01['TS'].copy()\n da_precl = data01['PRECL'].copy()\n data_dict['da_ts'] = da_ts.copy()\n data_dict['da_precl'] = da_precl.copy()\n # Dataset with *shifted* longitudes\n ds_shift_lon = climapy.xr_shift_lon(data01.copy())\n data_dict['ds_shift_lon'] = ds_shift_lon.copy()\n # Datasets with *reversed* lon/lat coordinates and data\n ds_rev_lon = data01.copy()\n ds_rev_lon['lon'].values = ds_rev_lon['lon'].values[::-1]\n for var_name in ['TS', 'PRECL']: # array order: time, lat, lon\n ds_rev_lon[var_name].values = ds_rev_lon[var_name].values[:, :, ::-1]\n ds_rev_lat = data01.copy()\n ds_rev_lat['lat'].values = ds_rev_lat['lat'].values[::-1]\n for var_name in ['TS', 'PRECL']:\n ds_rev_lat[var_name].values = ds_rev_lat[var_name].values[:, ::-1, :]\n ds_rev_both = data01.copy()\n ds_rev_both['lat'].values = ds_rev_both['lat'].values[::-1]\n ds_rev_both['lon'].values = ds_rev_both['lon'].values[::-1]\n for var_name in ['TS', 'PRECL']:\n ds_rev_both[var_name].values = ds_rev_both[var_name].values[:, ::-1, ::-1]\n data_dict['ds_rev_lon'] = ds_rev_lon.copy()\n data_dict['ds_rev_lat'] = ds_rev_lat.copy()\n data_dict['ds_rev_both'] = ds_rev_both.copy()\n # Dataset with *transposed* lon/lat coords\n ds_transposed = data01.copy()\n ds_transposed = ds_transposed.transpose()\n data_dict['ds_transposed'] = ds_transposed.copy()\n # Dataset with *renamed* longitude and latitude coords\n ds_renamed = data01.copy()\n ds_renamed = ds_renamed.rename({'lon': 'longitude', 'lat': 'latitude'})\n data_dict['ds_renamed'] = ds_renamed.copy()\n # Datasets with slightly *irregular* lon/lat coords, yet still monotonic\n nx, ny = data01['lon'].size, data01['lat'].size\n lon_irr = (data01['lon'].values +\n np_rand.uniform(low=-0.5, high=0.5, size=nx)) # add small amount of noise\n lon_irr[[0, -1]] = data01['lon'].values[[0, -1]] # keep end values unchanged\n lat_irr = (data01['lat'].values +\n np_rand.uniform(low=-0.5, high=0.5, size=ny))\n lat_irr[[0, -1]] = data01['lat'].values[[0, -1]]\n ds_irr_lon = data01.copy()\n ds_irr_lon['lon'].values = lon_irr.copy()\n ds_irr_lat = data01.copy()\n ds_irr_lat['lat'].values = lat_irr.copy()\n ds_irr_both = data01.copy()\n ds_irr_both['lon'].values = lon_irr.copy()\n ds_irr_both['lat'].values = lat_irr.copy()\n data_dict['ds_irr_lon'] = ds_irr_lon.copy()\n data_dict['ds_irr_lat'] = ds_irr_lat.copy()\n data_dict['ds_irr_both'] = ds_irr_both.copy()\n # Dataset with *strange* lon/lat coords - very irregular and not monotonic\n lon_strange = (data01['lon'].values +\n np_rand.uniform(low=-10, high=10, size=nx)) # add large amount of noise\n lon_strange[[0, -1]] = data01['lon'].values[[0, -1]] # keep end values unchanged\n lat_strange = (data01['lat'].values + np_rand.uniform(low=-10, high=10, size=ny))\n lat_strange[[0, -1]] = data01['lat'].values[[0, -1]] # keep end values unchanged\n ds_strange = data01.copy()\n ds_strange['lon'].values = lon_strange.copy()\n ds_strange['lat'].values = lat_strange.copy()\n data_dict['ds_strange'] = ds_strange.copy()\n # Return dictionary of data\n return data_dict", "def prep(self, deleteraw=False):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n if deleteraw:\n del self.rawdata\n del self.flags\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n self.delay = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n (trackt, trackc) = self.dmtrack0[dmbin]\n if len(trackc)<len(self.chans):\n print 'Computed track for DM=%.1f is too long for the observation; only %d channels are computed' % (self.dmarr[dmbin],len(trackc))\n continue\n \n# old way\n# self.twidths[dmbin] = [len(n.where(trackc == (chan-self.chans[0]))[0]) for chan in self.chans] # width of track for each unflagged channel\n# self.delay[dmbin] = [n.int(trackt[n.where(trackc == (chan-self.chans[0]))[0][0]]) for chan in self.chans] # integration delay for each unflagged channel of a given dm.\n# new way\n\n self.twidths[dmbin] = [len(n.where(n.array(trackc) == chan)[0]) for chan in range(len(self.chans))] # width of track for each unflagged channel\n self.delay[dmbin] = [n.int(trackt[n.where(n.array(trackc) == chan)[0][0]]) for chan in range(len(self.chans))] # integration delay for each unflagged channel of a given dm.\n\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, max(twidth)=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], max(self.twidths[dmbin]), max(self.twidths[dmbin]))", "def has_data(self):\n if len(self.channels) > 0:\n return True\n return False", "def setUp(self):\n\n serial_times = {295: '1971-07-31T01:24:11.754',\n 296: '1971-07-31T01:24:36.970',\n 297: '1971-07-31T01:25:02.243',\n 298: '1971-07-31T01:25:27.457',\n 299: '1971-07-31T01:25:52.669',\n 300: '1971-07-31T01:26:17.923'}\n self.serials = ['APOLLO15/METRIC/{}'.format(i) for i in serial_times.values()]\n\n\n x = list(range(5))\n y = list(range(5))\n pid = [0,0,1,1,1]\n idx = pid\n serials = [self.serials[0], self.serials[1], self.serials[2],\n self.serials[2], self.serials[3]]\n\n\n columns = ['x', 'y', 'idx', 'pid', 'nid']\n self.data_length = 5\n\n data = [x,y, idx, pid, serials]\n\n self.creation_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n cnet = C(data, index=columns).T\n\n io_controlnetwork.to_isis('test.net', cnet, mode='wb', targetname='Moon')\n\n self.header_message_size = 85\n self.point_start_byte = 65621", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def __init__(self, out_dir = 'output' ):\n \n self.data = {} # will contain the data for each different dataset \n self.datasets = '' # will contain the input datasets (original dictionary)\n self.datasets_keys = '' # will contain the input datasets names only (i.e. keys of the datasets dictionary)\n self.datasets_all = ['igra2' , 'era5_1' , 'ncar_w' , 'ncar_t', 'bufr' , 'era5_1759' , 'era5_1761' , 'era5_3188'] # all possibly available datasets \n #self.observation_ids_merged = { 'igra2':1 , 'ncar_t':2 , 'ncar_w':2, 'bufr':3, 'era5_1':4 , 'era5_1759' :5 , 'era5_1761':6 , 'era5_3188' :7} # values used to convert original record_id to the merged record_id, see method merge_all_data \n \n self.observation_ids_merged = { 'igra2':1 , 'ncar':2, 'bufr':3, 'era5_1':4 , 'era5_1759' :5 , 'era5_1761':6 , 'era5_3188' :7} # values used to convert original record_id to the merged record_id, see method merge_all_data \n \n self.unique_dates = {} \n self.attributes = {} # will keep the original attributes from the CDM tables, read from the netCDF files \n self.out_dir = out_dir", "def MyDataChangedCallback(self, inRefcon):\r\n pass", "def output_netcdf(forecast,proj_dict,grid_dict,start_hour,end_hour,\n stride,size,run_date,target_dataset,smoothing,config):\n for d,date in enumerate(run_date):\n date_outpath = config.forecast_out_path+'20{0}/netcdf/'.format(\n date)\n \n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n \n map_data = make_proj_grids(proj_dict,grid_dict)\n lons = map_data[\"lon\"]\n lats = map_data[\"lat\"]\n \n filtered_forecast = gaussian_filter(forecast[d],smoothing,mode='constant')\n \n filename = date_outpath + \"{0}_{6}_Hail_{1}_Cali_NMEP_{2}mm_{3}_Hours_{4}-{5}.nc\".format(\n config.ensemble_name,\n target_dataset,\n size,\n date,\n start_hour,end_hour,config.forecast_model_names)\n\n \n out_file = Dataset(filename, \"w\")\n out_file.createDimension(\"x\", filtered_forecast.shape[0])\n out_file.createDimension(\"y\", filtered_forecast.shape[1])\n out_file.createVariable(\"Longitude\", \"f4\", (\"x\", \"y\"))\n out_file.createVariable(\"Latitude\", \"f4\",(\"x\", \"y\"))\n out_file.createVariable(\"Data\", \"f4\", (\"x\", \"y\"))\n out_file.variables[\"Longitude\"][:,:] = lons\n out_file.variables[\"Latitude\"][:,:] = lats\n out_file.variables[\"Data\"][:,:] = filtered_forecast\n out_file.projection = proj_dict[\"proj\"]\n out_file.lon_0 = proj_dict[\"lon_0\"]\n out_file.lat_0 = proj_dict[\"lat_0\"]\n out_file.lat_1 = proj_dict[\"lat_1\"]\n out_file.lat_2 = proj_dict[\"lat_2\"]\n out_file.close()\n \n print(\"Writing to \" + filename)\n return", "def _handleRequestCableCheckParameters(self, data):\r\n print(\"\\\"Request Cable Check Parameters\\\" received\")\r\n message = self.whitebeet.v2gParseRequestCableCheckParameters(data)\r\n if 'dc' in message:\r\n print(\"SOC: {}%\".format(message['dc']['soc']))\r\n try:\r\n self.whitebeet.v2gSetDcCableCheckParameters(0, 1)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))", "def read(self, data=''):\n if not self.nc.io.closed():\n self.nc.io.read_bytes(MAX_CONTROL_LINE_SIZE, callback=self.read, streaming_callback=self.parse, partial=True)", "def create_netcdf(self, netcdf_file, cols, rows, doy_range, do_zlib=True):\n\n # least_significant_digit for createVariable(...)\n lsd=None\n\n # create a new netCDF file\n rootgrp = nc.Dataset(netcdf_file, \"w\")\n rootgrp.description = \"NBAR reflectance and BB albedo\"\n\n # Create groups for NBAR reflectance and BB albedo\n refl_grp = rootgrp.createGroup(\"reflectance\")\n refl_grp.description = \"Normalized at nadir (NBAR) reflectance\"\n albedo_grp = rootgrp.createGroup(\"albedo\")\n albedo_grp.description = \"Broad band (BB) albedo\"\n\n # Create dimensions for reflectance data. I.e. time series of reflectance\n # are determined by x, y and a day since 2000\n x = rootgrp.createDimension(\"x\", cols)\n y = rootgrp.createDimension(\"y\", rows)\n day = rootgrp.createDimension(\"day\", doy_range.shape[0])\n band = rootgrp.createDimension(\"band\", 7)\n str_dim = rootgrp.createDimension(\"str_dim\", 10)\n\n # We can set zlib=True for compression as an argument\n # of the createVariable function\n date_str = rootgrp.createVariable(\"date_str\", \"S1\", (\"day\", \"str_dim\"), zlib=do_zlib, least_significant_digit=lsd)\n date_str.units = \"string representation of date: yyyy.mm.dd\"\n # date = rootgrp.createVariable(\"julday\", \"i\")\n date = rootgrp.createVariable(\"julday\", \"i\", \"day\", zlib=do_zlib, least_significant_digit=lsd)\n date.units = \"Julian day\"\n\n # Create variables of NBAR reflectance: 7 MODIS bands\n for i in range(1, 8):\n refl = refl_grp.createVariable(\"refl_b%d\" % i, \"f4\", (\"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n refl.units = \"surface bidirectional reflectance, band %d\" % i\n\n # reflectance uncertainty\n refl_sd = refl_grp.createVariable(\"refl_b%d_sd\" % i, \"f4\", (\"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n refl_sd.units = \"uncertainty of surface bidirectional reflectance, band %d\" % i\n \n y_fwd = refl_grp.createVariable(\"y_fwd\", \"f4\", (\"day\", \"band\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n\n y_orig = refl_grp.createVariable(\"y_orig\", \"f4\", (\"day\", \"band\", \"x\", \"y\"), zlib=do_zlib, \\\n least_significant_digit=lsd)\n \n albedo_vis = albedo_grp.createVariable(\"albedo_vis\", \"f4\", (\"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n albedo_vis.units = \"broad band albedo\"\n albedo_nir = albedo_grp.createVariable(\"albedo_nir\", \"f4\", ( \"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n albedo_nir.units = \"broad band albedo\"\n albedo_swir = albedo_grp.createVariable(\"albedo_swir\", \"f4\", (\"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n albedo_swir.units = \"broad band albedo\"\n\n # albedo uncertainty\n albedo_vis_sd = albedo_grp.createVariable(\"albedo_vis_sd\", \"f4\", (\"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n albedo_vis_sd.units = \"albedo standard deviation\"\n albedo_nir_sd = albedo_grp.createVariable(\"albedo_nir_sd\", \"f4\", (\"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n albedo_nir_sd.units = \"albedo standard deviation\"\n albedo_swir_sd = albedo_grp.createVariable(\"albedo_swir_sd\", \"f4\", (\"day\", \"x\", \"y\"), zlib=do_zlib,\\\n least_significant_digit=lsd)\n albedo_swir_sd.units = \"albedo standard deviation\"\n\n # latitude and longitude arrays\n lat = rootgrp.createVariable('lat', \"f4\", (\"x\", \"y\"), zlib=do_zlib)\n lat.units = \"latitude\"\n lon = rootgrp.createVariable('lon', \"f4\", (\"x\", \"y\"), zlib=do_zlib)\n lon.units = \"longitude\"\n\n # save geo information and projection\n # nchar_proj = len(proj_str)\n # nchar_geo = len(geo_str)\n rootgrp.createDimension('nchar_proj', 400)\n rootgrp.createDimension('nchar_geo', 100)\n rootgrp.createVariable('proj', 'S1', ('nchar_proj'))\n rootgrp.createVariable('geo_transform', 'S1', 'nchar_geo')\n\n # proj_char = nc.stringtochar(np.array([proj_str], 'S%d' % nchar_proj))\n # geo_char = nc.stringtochar(np.array([geo_str], 'S%d' % nchar_geo))\n # rootgrp.variables['proj'][:] = proj_char\n # rootgrp.variables['geo_transform'][:] = geo_char\n\n return rootgrp", "def dataIdentify(self, in_nc):\r\n data_nc = NET.Dataset(in_nc)\r\n time = data_nc.variables['time'][:]\r\n diff = NUM.unique(NUM.diff(time))\r\n data_nc.close()\r\n #time_interval_highres = NUM.array([1.0,3.0,6.0],dtype=float)\r\n #time_interval_lowres_full = NUM.array([3.0, 6.0],dtype=float)\r\n #time_interval_lowres = NUM.array([6.0],dtype=float)\r\n #time_interval_lowres_3Hr = NUM.array([3.0],dtype=float)\r\n\t\t\r\n time_interval_HRES1 = NUM.array([1.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES13 = NUM.array([1.0,3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES136 = NUM.array([1.0,3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS3 = NUM.array([3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS36 = NUM.array([3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS6 = NUM.array([6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n\r\n\r\n #print \"SDR - diff:\", diff, time_interval_highres, time_interval_lowres_full, time_interval_lowres\r\n #if NUM.array_equal(diff, time_interval_highres):\r\n # return \"HighRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_full):\r\n # return \"LowResFull\"\r\n #elif NUM.array_equal(diff, time_interval_lowres):\r\n # return \"LowRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_3Hr):\r\n # return \"Low3HrRes\"\r\n #else:\r\n # return None\r\n\t\t\t\r\n if NUM.array_equal(diff, time_interval_HRES1): # Line Added/Modified CJB 20190108\r\n return \"HRES1\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES13): # Line Added/Modified CJB 20190108\r\n return \"HRES13\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES136): # Line Added/Modified CJB 20190108\r\n return \"HRES136\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS3): # Line Added/Modified CJB 20190108\r\n return \"ENS3\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS36): # Line Added/Modified CJB 20190108\r\n return \"ENS36\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS6): # Line Added/Modified MJS, CJB 20190108\r\n return \"ENS6\" # Line Added/Modified CJB 20190108\r\n else: # Line Added/Modified CJB 20190108\r\n return None # Line Added/Modified CJB 20190108\r", "def writeNetCDFData(out_nc, hrus, dr_time, hru_type, remapped_data, var_meta, var_attrs, var_encodings, remap_idx):\n\n dataset = xr.Dataset()\n\n for varname, meta in var_meta.items():\n foo = xr.DataArray(remapped_data[varname][:, remap_idx],\n dims=['time', 'basinID'],\n name=varname)\n\n foo.encoding = var_encodings[varname]\n foo.attrs = var_attrs[varname]\n\n dataset[varname] = foo\n\n # HRU ID variables\n dataset['basinID'] = xr.DataArray(hrus[remap_idx], dims=['basinID'])\n dataset['basinID'].encoding = {'dtype': hru_type, '_FillValue': None}\n dataset['basinID'].attrs = {'long_name': 'Basin ID'}\n\n dataset[TIME_DIM_NAME] = dr_time\n\n dataset.to_netcdf(out_nc, unlimited_dims='time')", "def write_netcdf(file, lons, lats, times, hydrographs, fractions, loc, grid_id,\n inds, Flist, velocity, diffusion, fill_value, verbose):\n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n lon = f.createDimension('lon', (len(lons)))\n lat = f.createDimension('lat', (len(lats)))\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n lon = f.createVariable('lon','f8',('lon',))\n lat = f.createVariable('lat','f8',('lat',))\n fraction = f.createVariable('fraction','f8',('lat','lon',),fill_value=fill_value)\n UHS = f.createVariable('unit_hydrograph','f8',('time','lat','lon',),fill_value=fill_value)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_id = str(grid_id.astype(np.int64))\n f.outlet_y= str(inds[0].astype(np.int64))\n f.outlet_x = str(inds[1].astype(np.int64)) # this is change is a cdo work around. Othewise cdo removes the attribute. \n f.outlet_lat = loc[0]\n f.outlet_lon = loc[1]\n f.includes = ', '.join(Flist)\n\n lat.long_name = 'latitude coordinate'\n lat.standard_name = 'latitude'\n lat.units = 'degrees_north'\n\n lon.long_name = 'longitude coordinate'\n lon.standard_name = 'longitude'\n lon.units = 'degrees_east'\n\n time.units = 'seconds since 0001-1-1 0:0:0'\n time.calendar = 'noleap'\n time.longname = 'time'\n time.type_prefered = 'float'\n time.description = 'Seconds since initial impulse'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to downstream grid location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n lon[:] = lons\n lat[:] = lats\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()", "def data_cb(self, handle, value):\n \n self.x, self.y, self.z, self.temp = unpack('fffB', value)\n print(f\"xyz: {self.x}, {self.y}, {self.z}\")\n self.send()", "def data_sample(CI, CC, thermal, calibration, health, driver_monitor, gps_location,\n poller, cal_status, cal_perc, overtemp, free_space, low_battery,\n driver_status, geofence, state, mismatch_counter, params):\n\n # Update carstate from CAN and create events\n CS = CI.update(CC)\n events = list(CS.events)\n enabled = isEnabled(state)\n\n # Receive from sockets\n td = None\n cal = None\n hh = None\n dm = None\n gps = None\n\n for socket, event in poller.poll(0):\n if socket is thermal:\n td = messaging.recv_one(socket)\n elif socket is calibration:\n cal = messaging.recv_one(socket)\n elif socket is health:\n hh = messaging.recv_one(socket)\n elif socket is driver_monitor:\n dm = messaging.recv_one(socket)\n elif socket is gps_location:\n gps = messaging.recv_one(socket)\n\n if td is not None:\n overtemp = td.thermal.thermalStatus >= ThermalStatus.red\n free_space = td.thermal.freeSpace < 0.15 # under 15% of space free no enable allowed\n low_battery = td.thermal.batteryPercent < 1 # at zero percent battery, OP should not be allowed\n\n # Create events for battery, temperature and disk space\n if low_battery:\n events.append(create_event('lowBattery', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n if overtemp:\n events.append(create_event('overheat', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n if free_space:\n events.append(create_event('outOfSpace', [ET.NO_ENTRY]))\n\n # Handle calibration\n if cal is not None:\n cal_status = cal.liveCalibration.calStatus\n cal_perc = cal.liveCalibration.calPerc\n\n if cal_status != Calibration.CALIBRATED:\n if cal_status == Calibration.UNCALIBRATED:\n events.append(create_event('calibrationIncomplete', [ET.NO_ENTRY, ET.SOFT_DISABLE, ET.PERMANENT]))\n else:\n events.append(create_event('calibrationInvalid', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n\n # When the panda and controlsd do not agree on controls_allowed\n # we want to disengage openpilot. However the status from the panda goes through\n # another socket than the CAN messages, therefore one can arrive earlier than the other.\n # Therefore we allow a mismatch for two samples, then we trigger the disengagement.\n if not enabled:\n mismatch_counter = 0\n\n if hh is not None:\n controls_allowed = hh.health.controlsAllowed\n if not controls_allowed and enabled:\n mismatch_counter += 1\n if mismatch_counter >= 2:\n events.append(create_event('controlsMismatch', [ET.IMMEDIATE_DISABLE]))\n\n # Driver monitoring\n if dm is not None:\n driver_status.get_pose(dm.driverMonitoring, params)\n\n # Geofence\n if geofence is not None and gps is not None:\n geofence.update_geofence_status(gps.gpsLocationExternal, params)\n if geofence is not None and not geofence.in_geofence:\n events.append(create_event('geofence', [ET.NO_ENTRY, ET.WARNING]))\n\n return CS, events, cal_status, cal_perc, overtemp, free_space, low_battery, mismatch_counter", "def __log_data_handler(self, event, sender, data):\n pos_x = -data.mvo.pos_x\n pos_y = -data.mvo.pos_y\n pos_z = -data.mvo.pos_z\n # First time we have meaningful values, we store them as reference\n if abs(pos_x) + abs(pos_y) + abs(pos_z) > 0.07:\n if self.ref_pos_x == -1:\n self.ref_pos_x = pos_x\n self.ref_pos_y = pos_y\n self.ref_pos_z = pos_z\n else:\n self.pos_x = pos_x - self.ref_pos_x\n self.pos_y = pos_y - self.ref_pos_y\n self.pos_z = pos_z - self.ref_pos_z\n\n qx = data.imu.q1\n qy = data.imu.q2\n qz = data.imu.q3\n qw = data.imu.q0\n\n degree = 0.01745\n siny = 2 * (qw * qz + qx * qy)\n cosy = 1 - 2 * (qy * qy + qz * qz)\n self.yaw = int(atan2(siny, cosy) / degree)\n\n if self.write_header:\n self.log_file.write(f\"{data.format_cvs_header()}\\n\")\n self.write_header = False\n self.log_file.write(f\"{data.format_cvs()}\\n\")", "def _handle_MonitorData (self, event, packet, reverse):\n pass", "def XPLMDataChanged_f(inRefcon):", "def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol", "def test_scp_handler_return_dataset_no_status(self):\n def handle(event):\n return Dataset()\n\n handlers = [(evt.EVT_C_ECHO, handle)]\n\n self.ae = ae = AE()\n ae.add_supported_context(VerificationSOPClass)\n ae.add_requested_context(VerificationSOPClass)\n scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)\n\n assoc = ae.associate('localhost', 11112)\n assert assoc.is_established\n rsp = assoc.send_c_echo()\n assert rsp.Status == 0x0000\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def get_data(datauri): \n print(datauri)\n nc = netCDF4.Dataset(datauri)\n time = nc.variables['JULD']\n depth = nc.variables['PRES']\n\n checkdepth = 0\n findepth = np.zeros(time.shape[0])\n for i in range (0, depth.shape[0]):\n maxdepth = np.amax(depth[i])\n findepth[i] = maxdepth\n if (maxdepth > checkdepth):\n dd=i\n checkdepth = maxdepth\n maxdepth = findepth[dd]\n \n temperature = nc.variables['TEMP'][dd] \n tempadj=nc.variables['TEMP_ADJUSTED'][dd]\n depthnew = nc.variables['PRES'][dd] \n depthadj = nc.variables['PRES_ADJUSTED'][dd] \n\n latitude = nc.variables['LATITUDE'][dd]\n longitude = nc.variables['LONGITUDE'][dd]\n\n lonm=nc.variables['LONGITUDE'][dd].mask\n latm=nc.variables['LATITUDE'][dd].mask\n timm=nc.variables['JULD'][dd].mask\n\n if (lonm == True or latm == True):\n longitude=-999.9\n latitude=-999.9\n\n\n out = {}\n out['latitude'] = nc.variables.pop('LATITUDE')[dd]\n out['longitude'] = nc.variables.pop('LONGITUDE')[dd]\n out['temperature'] = nc.variables.pop('TEMP')[dd]\n out['temperatureadj'] = nc.variables.pop('TEMP_ADJUSTED')[dd]\n out['salinity'] = nc.variables.pop('PSAL')[dd]\n out['salinityadj'] = nc.variables.pop('PSAL_ADJUSTED')[dd]\n out['depth'] = nc.variables.pop('PRES')[dd]\n out['depthadj'] = nc.variables.pop('PRES_ADJUSTED')[dd]\n \n return out", "def _async_process_data(self):\n raise NotImplementedError", "def test_check_cds_14(self):\n self.cds1.orientation = \"f\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\")\n self.assertEqual(count, 1)", "def before_dataobj_create(self, dataobj):", "def __init__(self, points_control, cf_file, year, site_data=None,\n output_request=('lcoe_fcr',), out_fpath=None,\n append=False, mem_util_lim=0.4):\n\n super().__init__(points_control, output_request, site_data=site_data,\n out_fpath=out_fpath, mem_util_lim=mem_util_lim)\n\n self._cf_file = cf_file\n self._year = year\n self._run_attrs['cf_file'] = cf_file\n self._run_attrs['sam_module'] = self._sam_module.MODULE\n\n # initialize output file or append econ data to gen file\n if append:\n self._out_fpath = self._cf_file\n else:\n self._init_fpath()\n\n mode = 'a' if append else 'w'\n self._init_h5(mode=mode)\n self._init_out_arrays()", "def prepare_train_coco_data(args):\n image_dir, annotation_file, data_dir = args.train_coco_image_dir, args.train_coco_annotation_file, args.train_coco_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name'])) \n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n anchor_files.append(os.path.join(data_dir, os.path.splitext(coco.imgs[img_id]['file_name'])[0]+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for ann in coco.imgToAnns[img_id]: \n classes.append(coco_category_to_class[ann['category_id']]) \n bboxes.append([ann['bbox'][1], ann['bbox'][0], ann['bbox'][3]+1, ann['bbox'][2]+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return coco, dataset", "def test_check_cds_15(self):\n self.cds1.locus_tag = \"\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 2)", "def test_check_cds_6(self):\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\")\n self.assertEqual(count, 0)", "def receiveCallback(self, socket, stateMask):\n # read the PDU header\n pduHeader = self.recv(EGSE.EDENPDU.PDU_HEADER_BYTE_SIZE)\n if pduHeader == None:\n # failure handling was done automatically by derived logic\n return\n # consistency check\n pduHeaderLen = len(pduHeader)\n if pduHeaderLen != EGSE.EDENPDU.PDU_HEADER_BYTE_SIZE:\n LOG_ERROR(\"Read of PDU header failed: invalid size: \" + str(pduHeaderLen), \"EDEN\")\n self.disconnectFromServer()\n return\n pdu = EGSE.EDENPDU.PDU(pduHeader)\n # read the data field for the PDU\n dataFieldLength = pdu.dataFieldLength\n if dataFieldLength > 0:\n dataField = self.recv(dataFieldLength)\n if dataField == None:\n # failure handling was done automatically by derived logic\n return\n # consistency check\n remainingSizeRead = len(dataField)\n if remainingSizeRead != dataFieldLength:\n LOG_ERROR(\"Read of remaining PDU failed: invalid remaining size: \" + str(remainingSizeRead), \"EDEN\")\n self.disconnectFromServer()\n return\n pdu.setDataField(dataField)\n # dispatch depending on pduType and subType\n try:\n if pdu.pduType == EGSE.EDENPDU.PDU_TYPE_TC_A:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_SPACE:\n # (TC_A,SPACE)\n LOG_INFO(\"EDEN.Client.receiveCallback(TC_A,SPACE)\", \"EDEN\")\n self.notifyTc_aSpace(pdu.field2, pdu.field3)\n elif pdu.subType == EGSE.EDENPDU.SUB_TYPE_SCOE:\n # (TC_A,SCOE)\n LOG_INFO(\"EDEN.Client.receiveCallback(TC_A,SCOE)\", \"EDEN\")\n self.notifyTc_aScoe(pdu.field2, pdu.field3)\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType), \"EDEN\")\n LOG(\"PDU = \" + str(pdu), \"EDEN\")\n self.disconnectFromServer()\n elif pdu.pduType == EGSE.EDENPDU.PDU_TYPE_TC_E:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_SPACE:\n # (TC_E,SPACE)\n LOG_INFO(\"EDEN.Client.receiveCallback(TC_E,SPACE)\", \"EDEN\")\n tc_eSpacePDU = EGSE.EDENPDU.TC_Espace(pdu.buffer)\n self.notifyTc_eSpace(tc_eSpacePDU.getCCSDSpacket())\n elif pdu.subType == EGSE.EDENPDU.SUB_TYPE_SCOE:\n # (TC_E,SCOE)\n LOG_INFO(\"EDEN.Client.receiveCallback(TC_E,SCOE)\", \"EDEN\")\n tc_eScoePDU = EGSE.EDENPDU.TC_Escoe(pdu.buffer)\n self.notifyTc_eScoe(tc_eScoePDU.getCCSDSpacket())\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType), \"EDEN\")\n LOG(\"PDU = \" + str(pdu))\n self.disconnectFromServer()\n elif pdu.pduType == EGSE.EDENPDU.PDU_TYPE_TM:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_SPACE:\n # (TM,SPACE)\n LOG_INFO(\"EDEN.Client.receiveCallback(TM,SPACE)\", \"EDEN\")\n tmSpacePDU = EGSE.EDENPDU.TMspace(pdu.buffer)\n self.notifyTmSpace(tmSpacePDU.getCCSDSpacket())\n elif pdu.subType == EGSE.EDENPDU.SUB_TYPE_SCOE:\n # (TM,SCOE)\n LOG_INFO(\"EDEN.Client.receiveCallback(TM,SCOE)\", \"EDEN\")\n tmScoePDU = EGSE.EDENPDU.TMscoe(pdu.buffer)\n self.notifyTmScoe(tmScoePDU.getCCSDSpacket())\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType), \"EDEN\")\n LOG(\"PDU = \" + str(pdu), \"EDEN\")\n self.disconnectFromServer()\n elif pdu.pduType == EGSE.EDENPDU.PDU_TYPE_CMD:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_ANSW:\n # (CMD,ANSW)\n LOG_INFO(\"EDEN.Client.receiveCallback(CMD,ANSW)\", \"EDEN\")\n self.notifyCmdAnsw(pdu.getDataField().tostring())\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType), \"EDEN\")\n LOG(\"PDU = \" + str(pdu), \"EDEN\")\n self.disconnectFromServer()\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid pduType: \" + str(pdu.pduType), \"EDEN\")\n LOG(\"PDU = \" + str(pdu), \"EDEN\")\n self.disconnectFromServer()\n except Exception as ex:\n LOG_ERROR(\"Processing of received PDU failed: \" + str(ex), \"EDEN\")\n self.disconnectFromServer()", "def ncwrt_retrieval_config( retr_setup, outname=None ):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'retrconfig.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #--\n schedule_dct = retr_setup.schedule_dct\n statevector = retr_setup.prstate\n #-- turn list into array\n sim_typ = np.array(schedule_dct['sim_typ'], dtype=np.int32)\n timepts = schedule_dct['date_utc']\n nstvar,npts = statevector.shape\n #-- overpass geometries SZA,SAA,VZA,VAA\n ivgeom = np.empty((npts,4), dtype=np.float64)\n ivgeom[:,0] = schedule_dct['sza']\n ivgeom[:,1] = schedule_dct['saa']\n ivgeom[:,2] = schedule_dct['vza']\n ivgeom[:,3] = schedule_dct['vaa']\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n d2 = ncfp.createDimension('ngeo',4)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', statevector.dtype, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n #-- simulation type\n ncvar = ncfp.createVariable( 'sim_typ', sim_typ.dtype, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar[:] = sim_typ[:]\n ncvar.setncattr('long_name','simulation_type')\n ncvar.setncattr('comment', 'integer value which is to be bit-interpreted')\n ncvar.setncattr('nobits_set', 'time-point with other state')\n ncvar.setncattr('bit0_is_set', 'time-point for S1 simulation')\n ncvar.setncattr('bit1_is_set', 'time-point for S2 simulation')\n ncvar.setncattr('bit2_is_set', 'time-point for S1A simulation')\n ncvar.setncattr('bit3_is_set', 'time-point for S1B simulation')\n ncvar.setncattr('bit4_is_set', 'time-point for S2A simulation')\n ncvar.setncattr('bit5_is_set', 'time-point for S2B simulation')\n \n #-- illumination-view geometry\n ncvar = ncfp.createVariable( 'ivgeom', ivgeom.dtype, ('npoints','ngeo'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('sza','igeo: 0')\n ncvar.setncattr('saa','igeo: 1')\n ncvar.setncattr('vza','igeo: 2')\n ncvar.setncattr('vaa','igeo: 3')\n ncvar[:,:] = ivgeom[:,:]\n \n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def check_condor(self):\n\n self.cluster=self.info['mw_run']['1']\n self.norm_with_cross=self.info['mw_run']['4']\n self.condor_req=self.info['mw_run']['11']\n\n #type is automaticaly updated now\n #self.cluster=int(condor)\n #if norm_with_cross==\"F\":\n # self.norm_with_cross=0\n #else:\n # self.norm_with_cross=1", "def receiveCallback(self, socket, stateMask):\n # read the PDU header\n pduHeader = self.recv(EGSE.EDENPDU.PDU_HEADER_BYTE_SIZE)\n if pduHeader == None:\n # failure handling was done automatically by derived logic\n return\n # consistency check\n pduHeaderLen = len(pduHeader)\n if pduHeaderLen != EGSE.EDENPDU.PDU_HEADER_BYTE_SIZE:\n LOG_ERROR(\"Read of PDU header failed: invalid size: \" + str(pduHeaderLen))\n self.disconnectClient()\n return\n pdu = EGSE.EDENPDU.PDU(pduHeader)\n # read the data field for the PDU\n dataFieldLength = pdu.dataFieldLength\n if dataFieldLength > 0:\n dataField = self.recv(dataFieldLength)\n if dataField == None:\n # failure handling was done automatically by derived logic\n return\n # consistency check\n remainingSizeRead = len(dataField)\n if remainingSizeRead != dataFieldLength:\n LOG_ERROR(\"Read of remaining PDU failed: invalid remaining size: \" + str(remainingSizeRead))\n self.disconnectClient()\n return\n pdu.setDataField(dataField)\n # dispatch depending on pduType and subType\n try:\n if pdu.pduType == EGSE.EDENPDU.PDU_TYPE_TC:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_SPACE:\n # (TC,SPACE)\n LOG_INFO(\"EDEN.Server.receiveCallback(TC,SPACE)\")\n tcSpacePDU = EGSE.EDENPDU.TCspace(pdu.buffer)\n if self.notifyTcSpace(tcSpacePDU.getCCSDSpacket()):\n # forwarding OK\n self.sendTc_eSpace(tcSpacePDU, 0)\n self.sendTc_aSpace(0, tcSpacePDU.tcIdentificationWord)\n else:\n # forwarding failed\n self.sendTc_eSpace(tcSpacePDU, 1)\n self.sendTc_aSpace(1, tcSpacePDU.tcIdentificationWord)\n elif pdu.subType == EGSE.EDENPDU.SUB_TYPE_SCOE:\n # (TC,SCOE)\n LOG_INFO(\"EDEN.Server.receiveCallback(TC,SCOE)\")\n tcScoePDU = EGSE.EDENPDU.TCscoe(pdu.buffer)\n if self.notifyTcScoe(tcScoePDU.getCCSDSpacket()):\n # forwarding OK\n self.sendTc_eScoe(tcScoePDU, 0)\n self.sendTc_aScoe(0, tcScoePDU.tcIdentificationWord)\n else:\n # forwarding failed\n self.sendTc_eScoe(tcScoePDU, 1)\n self.sendTc_aScoe(1, tcScoePDU.tcIdentificationWord)\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType))\n LOG(\"PDU = \" + str(pdu))\n self.disconnectClient()\n elif pdu.pduType == EGSE.EDENPDU.PDU_TYPE_CMD:\n if pdu.subType == EGSE.EDENPDU.SUB_TYPE_EXEC:\n # (CMD,EXEC)\n LOG_INFO(\"EDEN.Server.receiveCallback(CMD,EXEC)\")\n self.notifyCmdExec(pdu.getDataField().tostring())\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid subType: \" + str(pdu.subType))\n LOG(\"PDU = \" + str(pdu))\n self.disconnectClient()\n else:\n LOG_ERROR(\"Read of PDU header failed: invalid pduType: \" + str(pdu.pduType))\n LOG(\"PDU = \" + str(pdu))\n self.disconnectClient()\n except Exception as ex:\n LOG_ERROR(\"Processing of received PDU failed: \" + str(ex))\n self.disconnectClient()", "def on_ctcp(self, raw_msg, source, msg, **kwargs):", "def test_scp_callback_return_dataset_no_status(self):\n self.scp = DummyVerificationSCP()\n self.scp.status = Dataset()\n self.scp.start()\n\n ae = AE()\n ae.add_requested_context(VerificationSOPClass)\n assoc = ae.associate('localhost', 11112)\n assert assoc.is_established\n rsp = assoc.send_c_echo()\n assert rsp.Status == 0x0000\n assoc.release()\n self.scp.stop()", "def check_config(self):\n assert 'AUTO' in self.config\n assert 'LAT' in self.config\n assert 'LON' in self.config\n assert 'ALL_CHANNELS' in self.config\n for key in self.extract_servo_channels():\n assert 'STATUS' in self.config[key]\n self.config[key]['STATUS'] = float(self.config[key]['STATUS'])\n assert 0.0 <= self.config[key]['STATUS'] <= 1.0\n\n if not 'SUNRISE_BUFFER' in self.config[key]:\n self.config[key]['SUNRISE_BUFFER'] = 0\n if not 'SUNSET_BUFFER' in self.config[key]:\n self.config[key]['SUNSET_BUFFER'] = 0", "def check_cdfIntegrity(self, step):\n # Selecting bins automatically:\n x_max = self.onpower_train.max().values[0]\n x_min = 0\n step = 1\n x_onpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = 0\n x_min = self.offpower_train.min().values[0]\n step = 1\n x_offpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = self.duration_train.max().values[0]\n x_min = 0\n step = 1\n x_duration = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n # Evaluating score for:\n # Onpower\n y_onpower = self.__pdf2(self.onpower, x_onpower)\n print(\"Onpower cdf: \" + str(y_onpower.sum()))\n\n # Offpower\n y_offpower = self.__pdf2(self.offpower, x_offpower)\n print(\"Offpower cdf: \" + str(y_offpower.sum()))\n\n # duration\n y_duration = self.__pdf2(self.duration, x_duration)\n print(\"Duration cdf: \" + str(y_duration.sum()))\n\n # Plots:\n # fig1 = plt.figure()\n # ax1 = fig1.add_subplot(311)\n # ax2 = fig1.add_subplot(312)\n # ax3 = fig1.add_subplot(313)\n\n # ax1.plot(x_onpower, y_onpower)\n # ax1.set_title(\"PDF CDF: Onpower\")\n # ax1.set_ylabel(\"density\")\n # ax1.set_xlabel(\"Watts\")\n\n # ax2.plot(x_offpower, y_offpower)\n # ax2.set_title(\" PDF CDF: Offpower\")\n # ax2.set_ylabel(\"denisty\")\n # ax2.set_xlabel(\"Watts\")\n\n # ax3.plot(x_duration, y_duration)\n # ax3.set_title(\"PDF CDF: Duration\")\n # ax3.set_ylabel(\"density\")\n # ax3.set_xlabel(\"Seconds\")", "def testCC(self):\n self.assertEqual(\n self.cc,\n self.cd.cc\n )", "def validateChanBin(self):\n\n casalog.origin(\"ParallelDataHelper\")\n \n retval = True\n \n # Get the parameter name, which depends on the task calling this class\n parname = self.getChanAvgParamName()\n casalog.post('Channel average parameter is called %s'%parname,'DEBUG1')\n if parname == None:\n retval = False\n \n elif self.__args.has_key(parname):\n fblist = self.__args[parname]\n if isinstance(fblist,list): \n \n if fblist.__len__() > 1:\n if self.__spwList == None: \n msTool = mstool()\n msTool.open(self.__args['vis'])\n spwsel = self.__args['spw'] \n msTool.msselect({'spw':spwsel})\n ddInfo = msTool.getspectralwindowinfo()\n self.__spwList = [info['SpectralWindowId'] for info in ddInfo.values()]\n msTool.close()\n \n if self.__spwList.__len__() != fblist.__len__():\n retval = False\n raise ValueError, 'Number of %s is different from the number of spw' %parname \n \n\n return retval", "def test_check_cds_12(self):\n self.cds1.stop = -1\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\")\n self.assertEqual(count, 1)", "def test_check_cds_16(self):\n self.cds1.locus_tag = \"ABCXYZ\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def data_received(self, data):\n pass", "def dataValidation(self, in_nc):\r\n vars_oi_index = None\r\n data_nc = NET.Dataset(in_nc)\r\n dims = list(data_nc.dimensions)\r\n if dims not in self.dims_oi:\r\n raise Exception(self.errorMessages[1])\r\n\r\n vars = list(data_nc.variables)\r\n\r\n if vars == self.vars_oi[0]:\r\n vars_oi_index = 0\r\n elif vars == self.vars_oi[1]:\r\n vars_oi_index = 1\r\n elif vars == self.vars_oi[2]: # Line Added/Modified CJB 20190108\r\n vars_oi_index = 2 # Line Added/Modified CJB 20190108\r\n else: \r\n raise Exception(self.errorMessages[2])\r\n\r\n return vars_oi_index", "def read_mmclx_save_nc(mmclx_filename, nc_filename):\n\n # Open .mmclx file\n mmclx = xr.open_dataset(mmclx_filename)\n\n # Create .nc file with necessary variables\n ncfile = mmclx[\n [\n \"Ze\",\n \"VEL\",\n \"LDR\",\n \"RHO\",\n \"DPS\",\n \"NyquistVelocity\",\n \"nave\",\n \"nfft\",\n \"prf\",\n \"zrg\",\n \"drg\",\n ]\n ]\n # Invert order of dimensions\n ncfile = ncfile.transpose()\n\n # Convert + change attributes of time dimension\n ncfile[\"time\"] = (\n \"time\",\n pd.to_datetime(ncfile.time.values, unit=\"s\"),\n {\"long_name\": \"Time in epoch reference\", \"standard_name\": \"time\"},\n )\n # Change attributes of range dimension\n ncfile[\"range\"] = ncfile.range.assign_attrs(\n {\n \"long_name\": \"Range from antenna to the centre of each range gate\",\n \"standard_name\": \"range\",\n }\n )\n\n # Create additional instrument parameters\n ncfile[\"pulse_width\"] = float(\n ncfile.hrd[\n (ncfile.hrd.find(\"\\nPULSE_WIDTH:\") + 15) : ncfile.hrd.find(\n \"\\nRX_PULSEWIDTH:\"\n )\n ]\n )\n ncfile[\"pulse_width\"] = ncfile.pulse_width.assign_attrs(\n {\n \"long_name\": \"Pulse Width\",\n \"units\": \"s\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"hrd['PULSE_WIDTH']\",\n }\n )\n ncfile[\"prt\"] = 2e-4\n ncfile[\"prt\"] = ncfile.prt.assign_attrs(\n {\n \"long_name\": \"Pulse repetition time\",\n \"units\": \"s\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"\",\n }\n )\n ncfile[\"frequency\"] = 35e9\n ncfile[\"frequency\"] = ncfile.frequency.assign_attrs(\n {\n \"long_name\": \"Radiation Frequency\",\n \"units\": \"s^-1\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"\",\n }\n )\n ncfile[\"latitude\"] = float(ncfile.Latitude[:-1])\n ncfile[\"latitude\"] = ncfile.latitude.assign_attrs(\n {\n \"long_name\": \"Latitude\",\n \"units\": \"degrees_north\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"Latitude\",\n }\n )\n ncfile[\"longitude\"] = float(ncfile.Longitude[:-1])\n ncfile[\"longitude\"] = ncfile.longitude.assign_attrs(\n {\n \"long_name\": \"Longitude\",\n \"units\": \"degrees_east\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"Longitude\",\n }\n )\n ncfile[\"altitude\"] = float(ncfile.Altitude[:-1])\n ncfile[\"altitude\"] = ncfile.altitude.assign_attrs(\n {\n \"long_name\": \"Altitude\",\n \"units\": \"m\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"Altitude\",\n }\n )\n\n # Change names of variables\n ncfile = ncfile.rename_vars(\n {\n \"Ze\": \"filtered_reflectivity\",\n \"VEL\": \"filtered_velocity\",\n \"LDR\": \"filtered_linear_depolarization_ratio\",\n \"RHO\": \"cross_correlation_ratio\",\n \"DPS\": \"differential_phase\",\n \"NyquistVelocity\": \"nyquist_velocity\",\n \"nave\": \"n_samples\",\n \"nfft\": \"n_fft\",\n \"zrg\": \"n_range_gates\",\n \"drg\": \"range_resolution\",\n }\n )\n\n # Convert dBZ variables to dBZ\n ncfile.filtered_reflectivity.values = 10 * np.log10(\n ncfile.filtered_reflectivity.values\n )\n ncfile.filtered_linear_depolarization_ratio.values = 10 * np.log10(\n ncfile.filtered_linear_depolarization_ratio.values\n )\n\n # Change attributes of variables\n ncfile[\"filtered_reflectivity\"] = ncfile.filtered_reflectivity.assign_attrs(\n {\n \"units\": \"dBZ\",\n \"valid_range\": ncfile.filtered_reflectivity.attrs.pop(\"yrange\"),\n \"standard_name\": \"Ze\",\n \"long_name\": \"Filtered Equivalent Reflectivity Factor\",\n }\n )\n ncfile[\"filtered_velocity\"] = ncfile.filtered_velocity.assign_attrs(\n {\n \"units\": \"m * s^-1\",\n \"valid_range\": ncfile.filtered_velocity.attrs.pop(\"yrange\"),\n \"standard_name\": \"VEL\",\n \"long_name\": \"Filtered Mean Doppler Velocity\",\n }\n )\n ncfile[\n \"filtered_linear_depolarization_ratio\"\n ] = ncfile.filtered_linear_depolarization_ratio.assign_attrs(\n {\n \"units\": \"dB\",\n \"valid_range\": ncfile.filtered_linear_depolarization_ratio.attrs.pop(\n \"yrange\"\n ),\n \"standard_name\": \"LDR\",\n \"long_name\": \"Filtered Linear De-Polarization Ratio\",\n }\n )\n ncfile[\n \"cross_correlation_ratio\"\n ] = ncfile.cross_correlation_ratio.assign_attrs(\n {\n \"units\": \"unitless\",\n \"valid_range\": ncfile.cross_correlation_ratio.attrs.pop(\"yrange\"),\n \"standard_name\": \"RHO\",\n \"long_name\": \"Co-Cross Correlation Ratio\",\n }\n )\n ncfile[\"differential_phase\"] = ncfile.differential_phase.assign_attrs(\n {\n \"units\": \"degrees\",\n \"valid_range\": ncfile.differential_phase.attrs.pop(\"yrange\"),\n \"standard_name\": \"DPS\",\n \"long_name\": \"Differential Phase\",\n }\n )\n ncfile[\"nyquist_velocity\"] = ncfile.nyquist_velocity.assign_attrs(\n {\n \"units\": \"m * s^-1\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"NyquistVelocity\",\n \"long_name\": \"Nyquist Velocity\",\n }\n )\n ncfile[\"n_samples\"] = ncfile.n_samples.assign_attrs(\n {\n \"units\": \"unitless\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"nave\",\n \"long_name\": \"Number of spectral averages used to compute moments\",\n }\n )\n ncfile[\"n_fft\"] = ncfile.n_fft.assign_attrs(\n {\n \"units\": \"unitless\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"nfft\",\n \"long_name\": \"Number of FFT points\",\n }\n )\n ncfile[\"prf\"] = ncfile.prf.assign_attrs(\n {\n \"units\": \"Hz\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"prf\",\n \"long_name\": \"Pulse Repetition Frequency\",\n }\n )\n ncfile[\"n_range_gates\"] = ncfile.n_range_gates.assign_attrs(\n {\n \"units\": \"unitless\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"zrg\",\n \"long_name\": \"Number of range gates\",\n }\n )\n ncfile[\"range_resolution\"] = ncfile.range_resolution.assign_attrs(\n {\n \"units\": \"m\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"drg\",\n \"long_name\": \"Range resolution\",\n }\n )\n\n # Remove unnecessary attributes of variables\n for var in ncfile.variables:\n for attr in [\"axis\", \"db\", \"unit\"]:\n if attr in ncfile[var].attrs.keys():\n del ncfile[var].attrs[attr]\n\n # Remove unnecessary global attributes\n for attr in [\"Altitude\", \"Latitude\", \"Longitude\", \"ppar\", \"hrd\"]:\n del ncfile.attrs[attr]\n\n # Reorder variables\n ncfile = ncfile[\n [\n \"filtered_reflectivity\",\n \"filtered_velocity\",\n \"filtered_linear_depolarization_ratio\",\n \"cross_correlation_ratio\",\n \"differential_phase\",\n \"nyquist_velocity\",\n \"n_fft\",\n \"prf\",\n \"prt\",\n \"n_range_gates\",\n \"range_resolution\",\n \"n_samples\",\n \"pulse_width\",\n \"frequency\",\n \"latitude\",\n \"longitude\",\n \"altitude\",\n ]\n ]\n\n # Save .nc file\n ncfile.to_netcdf(\n nc_filename,\n unlimited_dims=\"time\",\n encoding={\"time\": {\"units\": \"seconds since 1970-01-01 00:00:00\"}},\n )", "def test_netCDF_sample_dimension_groups(self):\n c = cfdm.Count()\n\n c.nc_set_sample_dimension(\"ncvar\")\n\n with self.assertRaises(ValueError):\n c.nc_set_sample_dimension_groups([\"/forecast\"])\n\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertFalse(attrs)\n\n attrs = c.nc_clear_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertFalse(attrs)\n\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertFalse(attrs)\n\n c.nc_set_sample_dimension_groups([\"forecast\", \"model\"])\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertEqual(attrs, (\"forecast\", \"model\"))\n self.assertEqual(c.nc_get_sample_dimension(), \"/forecast/model/ncvar\")\n\n attrs = c.nc_clear_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertEqual(attrs, (\"forecast\", \"model\"))\n\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertFalse(attrs)\n self.assertEqual(c.nc_get_sample_dimension(), \"ncvar\")\n\n c.nc_set_sample_dimension(\"ncvar\")\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertEqual(attrs, ())\n\n c.nc_set_sample_dimension(\"/ncvar\")\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertFalse(attrs)\n\n c.nc_set_sample_dimension(\"/forecast/model/ncvar\")\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertEqual(attrs, (\"forecast\", \"model\"))\n\n c.nc_del_sample_dimension()\n attrs = c.nc_sample_dimension_groups()\n self.assertIsInstance(attrs, tuple)\n self.assertFalse(attrs)\n\n with self.assertRaises(ValueError):\n c.nc_set_sample_dimension_groups([\"forecast\", \"model\"])", "def has_fcc(self):\n raise NotImplementedError", "def check_data(self):\n super().check_data()\n\n for session_index, session_data in enumerate(self._data_to_analyse):\n if session_data.DATA_FORMAT != \"PyMEICA\":\n self.invalid_data_help = f\"Non PyMEICA format compatibility not yet implemented: \" \\\n f\"{session_data.DATA_FORMAT}\"\n return False\n\n return True", "def _handleRequestCableCheckStatus(self, data):\r\n print(\"\\\"Request Cable Check Status\\\" received\")\r\n self.whitebeet.v2gParseRequestCableCheckStatus(data)\r\n try:\r\n self.whitebeet.v2gSetDcCableCheckStatus(True)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))", "def write_to_netCDF(nc_filename, data,\n ncformat='NETCDF4_CLASSIC',\n all_variables=False,\n verbose=True):\n ncfile = Dataset(nc_filename,'w', format=ncformat, clobber=True)\n for dd,dim in enumerate(data['dims']):\n ncfile.createDimension(data['dimname'][dd],dim)\n for vv,varname in enumerate(data['varn']):\n if all_variables:\n newvar = ncfile.createVariable(varname,\n data['vardtype'][vv],\n data['vardims'][vv])\n newvar[:] = data['data'][vv]\n newvar.units = data['units'][vv]\n else:\n if varname in core_variables:\n newvar = ncfile.createVariable(varname,\n data['vardtype'][vv],\n data['vardims'][vv],\n fill_value=data['fillValue'])\n newvar[:] = data['data'][vv]\n if verbose:\n print(varname)\n print(newvar[newvar == np.nan])\n newvar[newvar == np.nan] = data['fillValue']\n newvar.units = data['units'][vv]\n ncfile.createDimension('nchars',19)\n newvar[:] = data['time']\n ncfile.description = data['description']\n ncfile.station = data['station']\n ncfile.sensor = data['sensor']\n ncfile.latitude = data['latitude']\n ncfile.longitude = data['longitude']\n ncfile.altitude = data['altitude']\n ncfile.createdon = datetime.now().strftime(standard_datetime_fmt)\n ncfile.createdby = data['author']\n ncfile.close()", "def test_cds_invalid_coordinates(self):\n for i in (-10, -1, 6, 100):\n self.assertIsNone(self.t.cds_coordinate_to_chromosome(i))\n self.assertIsNone(self.t.cds_coordinate_to_transcript(i))", "def test_cds_invalid_coordinates(self):\n for i in (-10, -1, 6, 100):\n self.assertIsNone(self.t.cds_coordinate_to_chromosome(i))\n self.assertIsNone(self.t.cds_coordinate_to_transcript(i))", "def test_cds_invalid_coordinates(self):\n for i in (-10, -1, 4, 100):\n self.assertIsNone(self.t.cds_coordinate_to_chromosome(i))\n self.assertIsNone(self.t.cds_coordinate_to_transcript(i))" ]
[ "0.60441834", "0.5924442", "0.5870361", "0.56942517", "0.5658238", "0.56382203", "0.5624735", "0.5600587", "0.5563595", "0.55417067", "0.54565823", "0.5366081", "0.5311197", "0.53014123", "0.5301123", "0.52937174", "0.5290408", "0.5289225", "0.52625626", "0.52489024", "0.5247772", "0.5214412", "0.52054906", "0.518749", "0.51700234", "0.5149499", "0.5134563", "0.5132516", "0.5125593", "0.51050097", "0.51018405", "0.50990194", "0.50936776", "0.50895715", "0.5049416", "0.5046888", "0.49993777", "0.49949896", "0.49900338", "0.4978755", "0.49786028", "0.49715045", "0.49519196", "0.4944707", "0.49330285", "0.49240318", "0.49134305", "0.49129245", "0.49103725", "0.4908793", "0.4900931", "0.48922437", "0.48884425", "0.48783892", "0.48675838", "0.48479193", "0.48439354", "0.48334563", "0.48333347", "0.48311105", "0.48261854", "0.47944984", "0.47905108", "0.4789384", "0.4787438", "0.47730207", "0.47663775", "0.4765562", "0.47643885", "0.47638428", "0.4762133", "0.4758218", "0.4745037", "0.47180137", "0.47176406", "0.47173542", "0.47163585", "0.47104806", "0.4694007", "0.46923023", "0.46894476", "0.46883446", "0.4680177", "0.4679541", "0.46788797", "0.46760118", "0.46724916", "0.4670996", "0.46707004", "0.46670395", "0.4664908", "0.46644863", "0.46635747", "0.46634686", "0.4659963", "0.465784", "0.46468505", "0.46445024", "0.46445024", "0.46430936" ]
0.74189216
0
Test solar data handling from CC data file with clearsky ratio calculated using clearsky ratio from NSRDB h5 file.
def test_solar_cc(): features = ['clearsky_ratio', 'rsds', 'clearsky_ghi'] input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')] nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5') with xr.open_mfdataset(input_files) as fh: min_lat = np.min(fh.lat.values) min_lon = np.min(fh.lon.values) - 360 target = (min_lat, min_lon) shape = (len(fh.lat.values), len(fh.lon.values)) with pytest.raises(AssertionError): handler = DataHandlerNCforCC(input_files, features=features, target=target, shape=shape, val_split=0.0, worker_kwargs=dict(max_workers=1)) handler = DataHandlerNCforCC(input_files, features=features, nsrdb_source_fp=nsrdb_source_fp, target=target, shape=shape, temporal_slice=slice(0, 1), val_split=0.0, worker_kwargs=dict(max_workers=1)) cs_ratio = handler.data[..., 0] ghi = handler.data[..., 1] cs_ghi = handler.data[..., 2] cs_ratio_truth = ghi / cs_ghi assert cs_ratio.max() < 1 assert cs_ratio.min() > 0 assert (ghi < cs_ghi).all() assert np.allclose(cs_ratio, cs_ratio_truth) with Resource(nsrdb_source_fp) as res: meta = res.meta tree = KDTree(meta[['latitude', 'longitude']]) cs_ghi_true = res['clearsky_ghi'] # check a few sites against NSRDB source file for i in range(4): for j in range(4): test_coord = handler.lat_lon[i, j] _, inn = tree.query(test_coord) assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source", "def runTest(self):\n ncfile = netCDF4.Dataset(URL)\n assert varname in ncfile.variables.keys()\n var = ncfile.variables[varname]\n assert var.shape == varshape\n data = var[:]\n assert_array_almost_equal(data.min(),varmin)\n assert_array_almost_equal(data.max(),varmax)\n ncfile.close()\n # test https support (linked curl lib must built with openssl support)\n ncfile = netCDF4.Dataset(URL_https)\n assert(ncfile['sst'].long_name=='Sea Surface Temperature') \n ncfile.close()", "def runTest(self):\n nc = Dataset(self.file)\n data = nc['vl'][-1]\n # check max error of compression\n err = np.abs(data - self.data)\n assert(err.max() < nc['vl'].scale_factor)\n # turn off auto-scaling\n nc.set_auto_maskandscale(False)\n data = nc['vl'][-1]\n assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))\n nc.close()", "def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()", "def extract_hrc_data(obsid, data_dir):\n#\n#--- extract fits data\n#\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=hrc\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n#\n#--- create directories and move the data into them\n#\n cmd = 'mkdir primary secondary'\n os.system(cmd)\n \n cmd = 'mv *dtf1*fits* *fov*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'mv *bpix1*fits* *evt1*fits* *msk1*fits* *mtl1*fits* \\\n *std_dtfstat1.fits* *std_flt1.fits* ./secondary/.'\n os.system(cmd)\n\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=pcad\\n'\n line = line + 'subdetector=aca\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n cmd = 'mv *asol*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'rm -rf *fits* zline zout'\n os.system(cmd)\n\n hdir = data_dir + '/' + str(obsid)\n if os.path.isdir(hdir):\n cmd = 'rm -rf ' + hdir + '/*'\n os.system(cmd)\n else:\n cmd = 'mkdir ' + hdir \n os.system(cmd)\n\n cmd = 'chmod 774 primary/* secondary/*'\n os.system(cmd)\n\n#\n#--- check whether there are duplicated fits files extracted; if so, remove older ones\n#\n h_list = ['dtf1', 'fov1', 'asol1']\n sdir = 'primary'\n remove_duplicate(h_list, sdir)\n\n h_list = ['bpix1', 'evt1', 'msk1', 'mtl1', 'std_dtfstat1', 'std_flt1']\n sdir = 'secondary'\n remove_duplicate(h_list, sdir)\n\n cmd = 'mv primary secondary ' + hdir + '/.'\n os.system(cmd)\n\n cmd = 'rm -rf ' + hdir + '/analysis/* ' \n os.system(cmd)\n\n return check_data_exist(hdir)", "def main_bf_MISR(h5f, output_folder, SPATIAL_RESOLUTION=0.5, VZA_MAX=18, CAMERA='AN'):\n\n # =============================================================================\n # 1. Initialization\n # calculate constant parameters\n # initialize output arrays and output hdf5 file\n # check the number of CERES granules \n # =============================================================================\n\n print(\"-------MISR----->\", h5f)\n print(\"-------FID------<>\", h5f.fid)\n print(\"---->\", type(h5f))\n if type(h5f.fid) is str:\n output_nc_name = h5f.fid.split('/')[-1].replace('TERRA_BF_L1B', 'CLIMARBLE')\n else:\n output_nc_name = h5f.fid.name. \\\n decode(\"utf-8\").split('/')[-1]. \\\n replace('TERRA_BF_L1B', 'CLIMARBLE')\n\n output_nc_name = output_nc_name.replace('.h5', '.nc')\n\n # \n NUM_POINTS = 1 / SPATIAL_RESOLUTION\n NUM_LATS = int(180 / SPATIAL_RESOLUTION)\n NUM_LONS = int(360 / SPATIAL_RESOLUTION)\n\n LAT_EDGES = np.arange(-90.0, 90.0001, SPATIAL_RESOLUTION)\n LON_EDGES = np.arange(-180.0, 180.0001, SPATIAL_RESOLUTION)\n\n # \n orbit_radiance_sum = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_radiance_num = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_nc_out = os.path.join(output_folder, output_nc_name)\n\n\n # =============================================================================\n # 2. Main processing\n # Loop through each CERES granule and sort radiances into the corresponding lat/lon bins\n # When encounters an asceding granule, script will move to the next granule\n # =============================================================================\n\n # USE MODIS granules to match first and last time of the descending node\n MISR_blocks = get_descending(h5f, 'MISR.{}'.format(CAMERA))\n if MISR_blocks[0] == 0:\n print(\">> IOError( no available MODIS granule in orbit {} )\".format(bf_file))\n return\n\n # LOAD lat/lon here\n lat = h5f['MISR/Geolocation/GeoLatitude'][:]\n lon = h5f['MISR/Geolocation/GeoLongitude'][:]\n\n # LOAD radiance here\n MISR_bands = ['Blue', 'Green', 'Red', 'NIR']\n rads_all = []\n for iband in MISR_bands:\n rads_all.append(h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, iband)][:])\n\n # SPECIFY data dimension to interpolate SZA/VZA\n rad_shape = (128, 512)\n \n\n # LOOP through MISR blocks (starts from 0)\n for iblk in MISR_blocks:\n\n # INTERPOLATE sza and vza (this part can be replaced by a more accurate function)\n raw_sza = h5f['MISR/Solar_Geometry/SolarZenith'][iblk]\n raw_vza = h5f['MISR/{}/Sensor_Geometry/{}Zenith'.format(CAMERA, ''.join(c.lower() if i==1 else c for i,c in enumerate(CAMERA)))][iblk]\n np.place(raw_sza, raw_sza<0, np.nan)\n np.place(raw_vza, raw_vza<0, np.nan)\n blk_sza = resize(raw_sza, rad_shape)\n blk_vza = resize(raw_vza, rad_shape)\n\n\n # SELECT lat/lon\n idx_geometry = np.where((blk_sza<89.0) & (blk_vza<VZA_MAX))\n select_lat = lat[iblk][idx_geometry]\n select_lon = lon[iblk][idx_geometry]\n\n\n # SELECT spectral radiances here\n # Aggregate 275-m res data to 1.1-km when necessary\n # Separate band by band to allow one (or more) band(s) failure\n for iband, band_name in enumerate(MISR_bands, start=0):\n blk_rad = rads_all[iband][iblk]\n # blk_rad = h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, band_name)][iblk]\n\n if blk_rad.shape == (512, 2048): \n # 275-m res band\n np.place(blk_rad, blk_rad<0, np.nan)\n fnl_blk_rad = np.nanmean(np.reshape(blk_rad, (blk_rad.shape[0]//4, 4, blk_rad.shape[1]//4,4)), axis=(1,3))\n else:\n fnl_blk_rad = blk_rad\n\n\n select_rad = np.nan_to_num(fnl_blk_rad[idx_geometry])\n fnl_idx = np.where((select_rad>0)&(select_rad<1000))[0]\n\n fnl_lat = select_lat[fnl_idx] * -1\n fnl_lon = select_lon[fnl_idx]\n fnl_rad = select_rad[fnl_idx]\n\n try:\n rad_sum, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='sum')\n rad_cnt, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='count')\n\n orbit_radiance_sum[:, :, iband] += rad_sum\n orbit_radiance_num[:, :, iband] += rad_cnt\n except ValueError:\n continue\n\n # =============================================================================\n # 3. Save results\n # =============================================================================\n orbit_radiance_num = np.array(orbit_radiance_num, dtype='int16')\n\n coords_lats = np.linspace(90-SPATIAL_RESOLUTION/2, -90+SPATIAL_RESOLUTION/2, NUM_LATS)\n coords_lons = np.linspace(-180+SPATIAL_RESOLUTION/2, 180-SPATIAL_RESOLUTION/2, NUM_LONS)\n\n xr_rad_sum = xr.DataArray(orbit_radiance_sum, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_num = xr.DataArray(orbit_radiance_num, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_sum.encoding['_FillValue'] = 0\n xr_rad_num.encoding['_FillValue'] = 0\n xr_rad_sum.name = 'MISR spec rad sum'\n xr_rad_num.name = 'MISR spec rad num'\n xr_rad_sum.to_netcdf(orbit_nc_out, 'a')\n xr_rad_num.to_netcdf(orbit_nc_out, 'a')\n return orbit_nc_out", "def test_CFCalculation_hdf_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n expected_results = [\n CFCoefficient(l=2, m=0, spin_up=-571.68845386399, spin_down=-558.2336974657351, unit='K', convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-34.982539807305045,\n spin_down=-21.850435868549834,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=3.8503494779930776, spin_down=2.168215129491561, unit='K',\n convention='Stevens'),\n CFCoefficient(l=6,\n m=-6,\n spin_up=110.50156137060345,\n spin_down=85.58558990378205,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=6, spin_up=110.50156137060345, spin_down=85.58558990378205, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation()\n cf.readPot('files/cf_calculation/CFdata.hdf')\n cf.readCDN('files/cf_calculation/CFdata.hdf')\n results = cf.performIntegration()\n\n assert results == expected_results", "def read_test_data_file(self, filename):\n if not os.path.isfile(filename):\n raise Error(\"Test data file does not exist or problem with file: \" + filename)\n txt = open(filename, 'r').readlines()\n search_params = {}\n for curlinenum, curline in enumerate(txt):\n if curline.startswith('#END'): # end of search parameters\n break\n search_params[curline.split('=')[0].replace('#', '').strip()] = {\n 'value': curline.split('=')[1].split('/', 1)[0].strip(),\n 'comment': curline.split('/', 1)[1].strip()}\n while not txt[curlinenum].startswith('#1'):\n curlinenum += 1\n hdrline1 = txt[curlinenum]\n while not txt[curlinenum].startswith('#2'):\n curlinenum += 1\n hdrline2 = txt[curlinenum]\n headers1 = [a.strip() for a in hdrline1[2:].split('|')]\n headers2 = [a.strip() for a in hdrline2[2:].split('|')]\n headers = [headers1[i] + ' ' + headers2[i] for i in range(len(headers1))][:-1]\n headers = [a.replace(' mag', 'mag') for a in headers]\n split_at = [a.start() for a in re.finditer('\\|', hdrline1)][:-1]\n split_row = lambda row: [row[i:j].replace('\\n', '') for i, j in zip([0] + split_at, split_at + [None])]\n data = [split_row(row) for row in txt[curlinenum + 2:]]\n df = DataFrame(data, columns=headers)\n df.index = df['id id'].apply(lambda x: x.strip())\n df.index.name = 'NOMAD id'\n columns_to_drop = ['id id']\n df['RAJ2000'] = (df['RA hh mm ss'].apply(lambda x: float(x.split()[0])) +\n df['RA hh mm ss'].apply(lambda x: float(x.split()[1])) / 60. +\n df['RA hh mm ss'].apply(lambda x: float(x.split()[2])) / 3600.) * 15.\n columns_to_drop.append('RA hh mm ss')\n dec_sign = lambda x: -1.0 if x.strip()[0] == '-' else 1.0\n df['DEJ2000'] = (df['DEC dd mm ss'].apply(dec_sign) *\n (df['DEC dd mm ss'].apply(lambda x: float(x.split()[0].replace('+', '').replace('-', ''))) +\n df['DEC dd mm ss'].apply(lambda x: float(x.split()[1])) / 60. +\n df['DEC dd mm ss'].apply(lambda x: float(x.split()[2])) / 3600.))\n columns_to_drop.append('DEC dd mm ss')\n columns_to_drop.append('ExtractID id')\n df = df.drop(columns_to_drop, axis=1)\n for column in df: # check first in each column for ability to convert to integer, then float, then leave alone\n if df[column].dtype == object and column != 'Flags hex':\n if df[column][0].strip().lstrip('-').lstrip('+').isdigit():\n df[column] = df[column].apply(np.int)\n elif df[column][0].strip().lstrip('-').lstrip('+').replace('.', '0').isdigit():\n df[column] = df[column].apply(np.float)\n return search_params, df", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp", "def test_cl_fix_file(mock_get_filepath, cl_file, tmp_path):\n mock_get_filepath.return_value = os.path.join(tmp_path,\n 'fixed_cesm2_waccm_cl.nc')\n fix = Cl(None)\n fixed_file = fix.fix_file(cl_file, tmp_path)\n mock_get_filepath.assert_called_once_with(tmp_path, cl_file)\n fixed_dataset = Dataset(fixed_file, mode='r')\n assert fixed_dataset.variables['lev'].standard_name == (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n assert fixed_dataset.variables['lev'].formula_terms == (\n 'p0: p0 a: a b: b ps: ps')\n assert fixed_dataset.variables['lev'].units == '1'\n np.testing.assert_allclose(fixed_dataset.variables['a'][:], [1.0, 2.0])\n np.testing.assert_allclose(fixed_dataset.variables['b'][:], [0.0, 1.0])\n np.testing.assert_allclose(fixed_dataset.variables['a_bnds'][:],\n [[0.0, 1.5], [1.5, 3.0]])\n np.testing.assert_allclose(fixed_dataset.variables['b_bnds'][:],\n [[-1.0, 0.5], [0.5, 2.0]])", "def test_compute_Sv_ek80_CW_complex_BB_complex(ek80_cal_path):\n ek80_raw_path = ek80_cal_path / \"2018115-D20181213-T094600.raw\"\n ed = ep.open_raw(ek80_raw_path, sonar_model=\"EK80\")\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"CW\", encode_mode=\"complex\"\n )\n assert isinstance(ds_Sv, xr.Dataset)\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"BB\", encode_mode=\"complex\"\n )\n assert isinstance(ds_Sv, xr.Dataset)", "def test_CFCalculation_txt_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n #Make sure new script produces the same result as old one\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=-419.7891726292168,\n spin_down=-414.7152560307904,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-35.92607948104669,\n spin_down=-26.384951772020756,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=6.522900740505054, spin_down=5.488104692050172, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation(reference_radius='cdn')\n cf.readPot('files/cf_calculation/VKS.2.0.dat',\n 'files/cf_calculation/VKS.4.0.dat',\n 'files/cf_calculation/VKS.6.0.dat',\n lm=[(2, 0), (4, 0), (6, 0)])\n cf.readCDN('files/cf_calculation/Nd.dat', header=3)\n cf.cdn['RMT'] = 3.138049652\n results = cf.performIntegration()\n\n assert results == expected_results", "def sky_orbits(test=True):\n \n t = Table.read('/home/ana/data/baumgardt_positions.fits')\n \n ind_disterr = ~np.isfinite(t['e_Rsun'])\n t['e_Rsun'][ind_disterr] = 0.1 * t['Rsun'][ind_disterr]\n e_max = np.nanmax(t['e_Rsun'][~ind_disterr])\n ind_cap = t['e_Rsun']>e_max\n t['e_Rsun'][ind_cap] = e_max\n \n clusters = ['NGC 3201', 'NGC 4590', 'NGC 5824', 'NGC 5272', 'NGC 5139', 'NGC 5024']\n #clusters = ['NGC 5824', 'NGC 5024']\n N = len(clusters)\n \n match = dict()\n match['NGC 3201'] = dict(streams=['gjoll'], direction=[-1], nstep=[35], gc_label='NGC\\n3201', gcra_off=0*u.deg, gcdec_off=-13*u.deg, gcl_off=0*u.deg, gcb_off=-13*u.deg, stream_label=['$Gj\\\\\\\" oll$'], stream_ra=[-156*u.deg], stream_dec=[-4.5*u.deg], eq_angle=[-45*u.deg], stream_l=[-148*u.deg], stream_b=[-33*u.deg], gal_angle=[22*u.deg])\n \n match['NGC 4590'] = dict(streams=['fjorm'], direction=[1], nstep=[100], gc_label='NGC\\n4590', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=-13*u.deg, gcb_off=-10*u.deg, stream_label=['$Fj\\\\\\\" orm$'], stream_ra=[-22*u.deg], stream_dec=[66*u.deg], eq_angle=[35*u.deg], stream_l=[110*u.deg], stream_b=[50*u.deg], gal_angle=[-50*u.deg])\n \n match['NGC 5024'] = dict(streams=['sylgr', 'ravi'], direction=[-1, 1], nstep=[300,500], gc_label='NGC\\n5024', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=10*u.deg, gcb_off=-20*u.deg, stream_label=['Sylgr', 'Ravi'], stream_ra=[-70*u.deg, 83*u.deg], stream_dec=[2*u.deg, -47*u.deg], eq_angle=[25*u.deg, 65*u.deg], stream_l=[-110*u.deg, -18.5*u.deg], stream_b=[62*u.deg, -47*u.deg], gal_angle=[30*u.deg, -10*u.deg])\n \n match['NGC 5139'] = dict(streams=['fimbulthul'], direction=[-1], nstep=[70], gc_label='NGC\\n5139', gcra_off=-5*u.deg, gcdec_off=-15*u.deg, gcl_off=0*u.deg, gcb_off=-12*u.deg, stream_label=['Fimbulthul'], stream_ra=[-20*u.deg], stream_dec=[-15*u.deg], eq_angle=[0*u.deg], stream_l=[-20*u.deg], stream_b=[45*u.deg], gal_angle=[0*u.deg])\n \n match['NGC 5272'] = dict(streams=['svol'], direction=[1], nstep=[70], gc_label='NGC\\n5272', gcra_off=-15*u.deg, gcdec_off=10*u.deg, gcl_off=-23*u.deg, gcb_off=-17*u.deg, stream_label=['$Sv\\\\\\\" ol$'], stream_ra=[-2*u.deg], stream_dec=[34*u.deg], eq_angle=[-10*u.deg], stream_l=[55*u.deg], stream_b=[55*u.deg], gal_angle=[-65*u.deg])\n \n match['NGC 5824'] = dict(streams=['triangulum', 'turbio'], direction=[1,1], nstep=[700,1], gc_label='NGC\\n5824', gcra_off=15*u.deg, gcdec_off=-5*u.deg, gcl_off=15*u.deg, gcb_off=-5*u.deg, stream_label=['Triangulum', 'Turbio'], stream_ra=[152*u.deg, 130*u.deg], stream_dec=[32*u.deg, -51*u.deg], eq_angle=[-48*u.deg, 30*u.deg], stream_l=[120*u.deg, -82*u.deg], stream_b=[-31*u.deg, -57*u.deg], gal_angle=[70*u.deg, 105*u.deg])\n \n dt = 0.5*u.Myr\n wangle = 180*u.deg\n ra_off = 120*u.deg\n l_off = 0*u.deg\n \n colors = [mpl.cm.plasma(0.95*x/N) for x in range(N)]\n \n np.random.seed(27529)\n if test:\n Nsample = 1\n else:\n Nsample = 100\n \n plt.close()\n fig = plt.figure(figsize=(12,12))\n \n ax0 = fig.add_subplot(211, projection='mollweide')\n ax1 = fig.add_subplot(212, projection='mollweide')\n ax = [ax0, ax1]\n \n for i in range(N):\n #ind = t['Name']== clusters[i]\n ind = t['Name']==clusters[i]\n t_ = t[ind]\n \n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n cgal = c.transform_to(coord.Galactic)\n #w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n color = colors[i]\n alpha_text = 0.8\n \n plt.sca(ax[0])\n plt.plot((c.ra + ra_off).wrap_at(wangle).rad, c.dec.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((c.ra + ra_off + match[clusters[i]]['gcra_off']).wrap_at(wangle).rad, (c.dec + match[clusters[i]]['gcdec_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n plt.sca(ax[1])\n plt.plot((cgal.l + l_off).wrap_at(wangle).rad, cgal.b.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((cgal.l + l_off + match[clusters[i]]['gcl_off']).wrap_at(wangle).rad, (cgal.b + match[clusters[i]]['gcb_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n\n for j in range(len(match[clusters[i]]['direction'])):\n # sample gc positional uncertainties\n for k in range(-1, Nsample):\n if k==-1:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1.5\n alpha = 1\n else:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'] + np.random.randn()*t_['e_Rsun'], pm_ra_cosdec=t_['pmRA_'] + np.random.randn()*t_['e_pmRA_'], pm_dec=t_['pmDE'] + np.random.randn()*t_['e_pmDE'], radial_velocity=t_['RV'] + np.random.randn()*t_['e_RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1\n alpha = 0.1\n \n orbit = ham.integrate_orbit(w0, dt=dt*match[clusters[i]]['direction'][j], n_steps=match[clusters[i]]['nstep'][j])\n orbit_eq = orbit.to_coord_frame(coord.ICRS, galactocentric_frame=gc_frame)\n orbit_gal = orbit.to_coord_frame(coord.Galactic, galactocentric_frame=gc_frame)\n \n \n plt.sca(ax[0])\n dra = (orbit_eq.ra+ra_off).wrap_at(wangle)[1:] - (orbit_eq.ra+ra_off).wrap_at(wangle)[:-1]\n if np.any(np.abs(dra)>180*u.deg):\n pos_break = dra>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_eq.dec.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_eq.dec.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad, orbit_eq.dec.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n plt.sca(ax[1])\n dl = orbit_gal.l.wrap_at(wangle)[1:] - orbit_gal.l.wrap_at(wangle)[:-1]\n if np.any(np.abs(dl)>180*u.deg):\n pos_break = dl>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_gal.b.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_gal.b.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad, orbit_gal.b.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n # add streams\n pkl = pickle.load(open('../data/streams/data_{:s}.pkl'.format(match[clusters[i]]['streams'][j]), 'rb'))\n cs = coord.SkyCoord(ra=pkl['dec'][0], dec=pkl['dec'][1], frame='icrs')\n cs_gal = cs.transform_to(coord.Galactic)\n \n plt.sca(ax[0])\n plt.plot((cs.ra+ra_off).wrap_at(wangle).rad, cs.dec.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_ra'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_dec'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['eq_angle'][j].value, ha='center', va='center')\n \n plt.sca(ax[1])\n plt.plot((cs_gal.l+l_off).wrap_at(wangle).rad, cs_gal.b.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_l'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_b'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['gal_angle'][j].value, ha='center', va='center')\n \n \n plt.sca(ax[0])\n plt.grid(ls=':')\n plt.xlabel('R.A. [deg]')\n plt.ylabel('Dec [deg]')\n\n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]-ra_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n plt.sca(ax[1])\n plt.grid(ls=':')\n plt.xlabel('Galactic longitude [deg]')\n plt.ylabel('Galactic latitude [deg]')\n \n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [2,3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]+l_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n \n plt.tight_layout(h_pad=2)\n plt.savefig('../paper/sky_orbits.pdf')", "def Test_data():\n print (\"loading test data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n\n with h5py.File(join(data_root, './data/test_real2.h5')) as f:\n test_real = f['test_real'][:]\n with h5py.File(join(data_root, './data/test_imag2.h5')) as f:\n test_imag = f['test_imag'][:]\n test_real = np.transpose(test_real, (0, 1, 3, 2))\n test_imag = np.transpose(test_imag, (0, 1, 3, 2))\n test_data = test_real+1j*test_imag\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end - time_start))\n return test_data", "def test_CFCalculation_hdf_files_wybourne_convention():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=(-1143.37690772798 + 0j),\n spin_down=(-1116.4673949314702 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=4,\n m=0,\n spin_up=(-279.86031845844036 + 0j),\n spin_down=(-174.80348694839867 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=0,\n spin_up=(61.60559164788924 + 0j),\n spin_down=(34.69144207186498 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=-6,\n spin_up=(116.32750335918315 + 4.696327749935313e-06j),\n spin_down=(90.09789430612014 + 3.6373963939901583e-06j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=6,\n spin_up=(116.32750335918315 - 4.696327749935313e-06j),\n spin_down=(90.09789430612014 - 3.6373963939901583e-06j),\n unit='K',\n convention='Wybourne')\n ]\n\n cf = CFCalculation()\n cf.readPot('files/cf_calculation/CFdata.hdf')\n cf.readCDN('files/cf_calculation/CFdata.hdf')\n results = cf.performIntegration(convert=False)\n\n print(results)\n assert results == expected_results", "def test_species_to_sdf_file(self):\n path = os.path.join(ARC_PATH, 'arc', 'testing', 'mol.sdf')\n spc = ARCSpecies(label='NCC', smiles='NCC')\n converter.species_to_sdf_file(spc, path)\n with open(path, 'r') as f:\n sdf_content = f.read()\n expected_sdf = \"\"\"\n RDKit 3D\n\n 10 9 0 0 0 0 0 0 0 0999 V2000\n 1.1517 -0.3760 -0.5231 N 0 0 0 0 0 0 0 0 0 0 0 0\n 0.2893 0.4500 0.3115 C 0 0 0 0 0 0 0 0 0 0 0 0\n -1.1415 -0.0561 0.2592 C 0 0 0 0 0 0 0 0 0 0 0 0\n 1.1386 -1.3376 -0.1854 H 0 0 0 0 0 0 0 0 0 0 0 0\n 2.1151 -0.0555 -0.4352 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.6517 0.4342 1.3447 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.3279 1.4855 -0.0414 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.2133 -1.0839 0.6308 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.7870 0.5726 0.8809 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.5327 -0.0332 -0.7636 H 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0\n 1 4 1 0\n 1 5 1 0\n 2 3 1 0\n 2 6 1 0\n 2 7 1 0\n 3 8 1 0\n 3 9 1 0\n 3 10 1 0\nM END\n$$$$\n\"\"\"\n self.assertEqual(sdf_content, expected_sdf)", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def convert_calculations(filename, hdf5_data):\n x1 = []\n\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n idx += 1\n\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n x2 = x1[idx].split()\n dset[0] = float(x2[0])\n dset[1] = float(x2[1])\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n idx = 6\n\n num_bodies = int(x1[idx].split()[0])\n\n for i in range(num_bodies):\n\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i+1) + '/'\n idx += 2\n\n mesh_x = []\n\n mesh_path = os.path.join(os.path.abspath(os.path.dirname(filename)), str(x1[idx].split()[0]).strip(' \\t\\n\\r'))\n\n with open(mesh_path, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n idx += 1\n x2 = x1[idx].split()\n\n num_points = int(x2[0])\n num_panels = int(x2[1])\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4), dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(mesh_x2[0])\n dset[0, 1] = int(mesh_x2[1])\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n idx += 1\n num = int(x1[idx].split()[0])\n for j in range(num):\n idx += 1\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[2])\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[1])\n\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[2])\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)", "def test_compute_Sv_ek80_BB_complex(ek80_path):\n ek80_raw_path = str(\n ek80_path.joinpath('ar2.0-D20201209-T235955.raw')\n ) # CW complex\n echodata = ep.open_raw(ek80_raw_path, sonar_model='EK80')\n ds_Sv = ep.calibrate.compute_Sv(\n echodata, waveform_mode='BB', encode_mode='complex'\n )\n assert isinstance(ds_Sv, xr.Dataset) is True\n ds_TS = ep.calibrate.compute_TS(\n echodata, waveform_mode='BB', encode_mode='complex'\n )\n assert isinstance(ds_TS, xr.Dataset) is True", "def test_defect_calculation_control():\n csnet = example.control.cs_network()\n slaves, connections = csnet\n step_sizes = {name: Fraction(1, 2) for name in slaves}\n make_zoh: cs.ConverterConstructor = cs.Zoh\n rate_converters = {cs.Connection(src, dst): make_zoh for dst, src in connections.items()}\n initial_tokens = {sdf.Dst('PI', 'u'): [0.], sdf.Dst('PT2', 'u'): [0.]}\n cosim = csnet, step_sizes, rate_converters, initial_tokens\n defect = cs.evaluate(cosim, Fraction(20.))\n for val in defect.connection.values():\n assert val < float('inf')\n for val in defect.output.values():\n assert val < float('inf')", "def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()", "def read_satellite(filename, ftype):\n #ftype = 'l3c'\n #filename = '/gws/nopw/j04/cds_c3s_sst/output/v2.6.0/l3c/AVHRR19_G/2018/03/01/20180301120000-C3S-L3C_GHRSST-SSTskin-AVHRR19_G-ICDR2.0_day-v02.0-fv01.0.nc'\n #ftype = 'l4'\n #filename = '/gws/nopw/j04/cds_c3s_sst/public/data/ICDR_v2/Analysis/L4/v2.0/2018/01/01/20180101120000-C3S-L4_GHRSST-SSTdepth-OSTIA-GLOB_ICDR2.0-v02.0-fv01.0.nc'\n print \"Reading %s file: %s\" % (ftype, filename)\n \n # Read data - L4 or L3C (note L4 mask and L3C quality level have same array name)\n ncin = netCDF4.Dataset(filename)\n if ftype == 'l4':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n sst = ncin.variables['analysed_sst'][:]\n unc = ncin.variables['analysis_uncertainty'][:]\n sea_ice_frac = ncin.variables['sea_ice_fraction'][:]\n ql = ncin.variables['mask'][:]\n sstfill = ncin.variables['analysed_sst']._FillValue\n sstao = ncin.variables['analysed_sst'].add_offset\n sstsf = ncin.variables['analysed_sst'].scale_factor\n elif ftype == 'l3c':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n time_bnds = ncin.variables['time_bnds'][:]\n sst = ncin.variables['sea_surface_temperature'][:]\n sst_depth = ncin.variables['sea_surface_temperature_depth'][:]\n sst_dtime = ncin.variables['sst_dtime'][:]\n sst_depth_dtime = ncin.variables['sst_depth_dtime'][:]\n sses_bias = ncin.variables['sses_bias'][:]\n sses_sd = ncin.variables['sses_standard_deviation'][:]\n sst_depth_total_unc = ncin.variables['sst_depth_total_uncertainty'][:]\n l2p_flags = ncin.variables['l2p_flags'][:]\n ql = ncin.variables['quality_level'][:]\n wind_speed = ncin.variables['wind_speed'][:]\n large_scale_cor_unc = ncin.variables['large_scale_correlated_uncertainty'][:]\n synop_cor_unc = ncin.variables['synoptically_correlated_uncertainty'][:]\n uncor_unc = ncin.variables['uncorrelated_uncertainty'][:]\n adj_unc = ncin.variables['adjustment_uncertainty'][:]\n aerosol_dyn_ind = ncin.variables['aerosol_dynamic_indicator'][:]\n sens = ncin.variables['sensitivity'][:]\n tfill = ncin.variables['sst_dtime']._FillValue\n sstfill = ncin.variables['sea_surface_temperature']._FillValue\n sstao = ncin.variables['sea_surface_temperature'].add_offset\n sstsf = ncin.variables['sea_surface_temperature'].scale_factor\n else:\n print 'ftype not recognised or supported'\n \n # Create time field\n # -> If L4 then create a time field set to time in L4 file\n # -> Also add a time fill value to keep coding simple later on\n if ftype == 'l4':\n time = np.empty((7200,3600))\n time[:,:] = time_read\n tfill = -2147483648\n else:\n time = copy.deepcopy(sst_dtime) # Need to make a hard copy\n mask = sst_dtime.mask == False; mask = mask[0,:,:]\n row, col = np.where(mask==True)\n time.data[0, row, col] = time.data[0,row, col] + time_read\n \n # Create output structure\n if ftype == 'l4':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n sst=sst,\n unc=unc,\n sea_ice_frac=sea_ice_frac,\n ql=ql,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n elif ftype == 'l3c':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n time_bnds=time_bnds,\n sst=sst,\n sst_depth=sst_depth,\n sst_dtime=sst_dtime,\n sst_depth_dtime=sst_depth_dtime,\n sses_bias=sses_bias,\n sses_sd=sses_sd,\n sst_depth_total_unc=sst_depth_total_unc,\n l2p_flags=l2p_flags,\n ql=ql,\n wind_speed=wind_speed,\n large_scale_cor_unc=large_scale_cor_unc,\n synop_cor_unc=synop_cor_unc,\n uncor_unc=uncor_unc,\n adj_unc=adj_unc,\n aerosol_dyn_ind=aerosol_dyn_ind,\n sens=sens,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n else:\n print 'ftype not recognised or supported'\n \n return data", "def read_skyh5(\n self, filename, run_check=True, check_extra=True, run_check_acceptability=True\n ):\n with h5py.File(filename, \"r\") as fileobj:\n if \"/Header\" not in fileobj:\n raise ValueError(\"This is not a proper skyh5 file.\")\n\n init_params = {\"filename\": os.path.basename(filename)}\n\n with h5py.File(filename, \"r\") as fileobj:\n # extract header information\n header = fileobj[\"/Header\"]\n header_params = [\n \"_Ncomponents\",\n \"_Nfreqs\",\n \"_component_type\",\n \"_spectral_type\",\n \"_history\",\n \"_name\",\n \"_nside\",\n \"_hpx_order\",\n \"_hpx_inds\",\n \"_freq_array\",\n \"_freq_edge_array\",\n \"_reference_frequency\",\n \"_spectral_index\",\n \"_stokes_error\",\n \"_beam_amp\",\n \"_extended_model_group\",\n ]\n\n optional_params = [\n \"_hpx_order\",\n \"_freq_array\",\n \"_freq_edge_array\",\n \"_reference_frequency\",\n \"_spectral_index\",\n \"_stokes_error\",\n \"_beam_amp\",\n \"_extended_model_group\",\n ]\n\n self.component_type = header[\"component_type\"][()].tobytes().decode(\"utf-8\")\n\n if self.component_type != \"healpix\":\n optional_params.extend([\"_nside\", \"_hpx_inds\"])\n if \"skycoord\" in header:\n skycoord_dict = {}\n for key in header[\"skycoord\"]:\n if key in [\"frame\", \"representation_type\"]:\n expected_type = str\n else:\n expected_type = None\n skycoord_dict[key] = _get_value_hdf5_group(\n header[\"skycoord\"], key, expected_type\n )\n init_params[\"skycoord\"] = SkyCoord(**skycoord_dict)\n else:\n if \"lat\" in header and \"lon\" in header and \"frame\" in header:\n header_params += [\"lat\", \"lon\", \"frame\"]\n optional_params += [\"lat\", \"lon\", \"frame\"]\n elif \"ra\" in header and \"dec\" in header:\n header_params += [\"ra\", \"dec\"]\n optional_params += [\"ra\", \"dec\"]\n else:\n raise ValueError(\n \"No component location information found in file.\"\n )\n warnings.warn(\n \"Parameter skycoord not found in skyh5 file. \"\n \"This skyh5 file was written by an older version of pyradiosky. \"\n \"Consider re-writing this file to ensure future compatibility\"\n )\n else:\n optional_params.append(\"_name\")\n\n if \"hpx_frame\" in header:\n if isinstance(header[\"hpx_frame\"], h5py.Dataset):\n # hpx_frame was stored as a string\n frame_str = _get_value_hdf5_group(header, \"hpx_frame\", str)\n dummy_coord = SkyCoord(0, 0, unit=\"rad\", frame=frame_str)\n init_params[\n \"hpx_frame\"\n ] = dummy_coord.frame.replicate_without_data(copy=True)\n else:\n # hpx_frame was stored as a nested dset\n skycoord_dict = {}\n for key in header[\"hpx_frame\"]:\n if key in [\"frame\", \"representation_type\"]:\n expected_type = str\n else:\n expected_type = None\n skycoord_dict[key] = _get_value_hdf5_group(\n header[\"hpx_frame\"], key, expected_type\n )\n dummy_coord = SkyCoord(0, 0, unit=\"rad\", **skycoord_dict)\n init_params[\n \"hpx_frame\"\n ] = dummy_coord.frame.replicate_without_data(copy=True)\n elif \"frame\" in header:\n # frame was stored as a string\n frame_str = _get_value_hdf5_group(header, \"frame\", str)\n dummy_coord = SkyCoord(0, 0, unit=\"rad\", frame=frame_str)\n init_params[\"hpx_frame\"] = dummy_coord.frame.replicate_without_data(\n copy=True\n )\n\n for par in header_params:\n if par in [\"lat\", \"lon\", \"frame\", \"ra\", \"dec\"]:\n parname = par\n if par == \"frame\":\n expected_type = \"str\"\n else:\n expected_type = Quantity\n else:\n param = getattr(self, par)\n parname = param.name\n expected_type = param.expected_type\n\n # skip optional params if not present\n if par in optional_params:\n if parname not in header:\n continue\n\n if parname not in header:\n raise ValueError(\n f\"Expected parameter {parname} is missing in file.\"\n )\n\n value = _get_value_hdf5_group(header, parname, expected_type)\n\n if parname == \"nside\":\n value = int(value)\n\n init_params[parname] = value\n\n # check that the parameters not passed to the init make sense\n if init_params[\"component_type\"] == \"healpix\":\n if init_params[\"Ncomponents\"] != init_params[\"hpx_inds\"].size:\n raise ValueError(\n \"Ncomponents is not equal to the size of 'hpx_inds'.\"\n )\n else:\n if init_params[\"Ncomponents\"] != init_params[\"name\"].size:\n raise ValueError(\"Ncomponents is not equal to the size of 'name'.\")\n\n if \"freq_array\" in init_params.keys():\n if init_params[\"Nfreqs\"] != init_params[\"freq_array\"].size:\n raise ValueError(\"Nfreqs is not equal to the size of 'freq_array'.\")\n\n if init_params[\"spectral_type\"] == \"subband\":\n if \"freq_edge_array\" not in init_params.keys():\n try:\n init_params[\n \"freq_edge_array\"\n ] = _get_freq_edges_from_centers(\n init_params[\"freq_array\"], self._freq_array.tols\n )\n except ValueError:\n warnings.warn(\n \"No freq_edge_array in this file and frequencies are \"\n \"not evenly spaced, so spectral_type will be set to \"\n \"'full' rather than 'subband'.\"\n )\n init_params[\"spectral_type\"] = \"full\"\n\n # remove parameters not needed in __init__\n init_params.pop(\"Ncomponents\")\n init_params.pop(\"Nfreqs\")\n\n # get stokes array\n dgrp = fileobj[\"/Data\"]\n init_params[\"stokes\"] = dgrp[\"stokes\"] * units.Unit(\n dgrp[\"stokes\"].attrs[\"unit\"]\n )\n # frame is a new parameter, check if it exists and try to read\n # otherwise default to ICRS (the old assumed frame.)\n if \"skycoord\" not in init_params and self.component_type != \"healpix\":\n if \"frame\" in header:\n init_params[\"frame\"] = header[\"frame\"][()].tobytes().decode(\"utf8\")\n else:\n warnings.warn(\n \"No frame available in this file, assuming 'icrs'. \"\n \"Consider re-writing this file to ensure future compatility.\"\n )\n init_params[\"frame\"] = \"icrs\"\n\n if self.component_type == \"healpix\" and \"hpx_frame\" in init_params.keys():\n init_params[\"frame\"] = init_params[\"hpx_frame\"]\n del init_params[\"hpx_frame\"]\n\n if self.component_type == \"healpix\" and \"frame\" not in init_params:\n warnings.warn(\n \"No frame available in this file, assuming 'icrs'. \"\n \"Consider re-writing this file to ensure future compatility.\"\n )\n init_params[\"frame\"] = \"icrs\"\n\n self.__init__(**init_params)\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )", "def run_main_test():\r\n\r\n print(\"\"\"\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n +++ Performing Main LZJD Full File Test +++\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n \"\"\")\r\n # iterate over the files in the directory\r\n for f in listdir(SRC):\r\n if isfile(join(SRC, f)):\r\n # prepare a dictionary with the digests ready to compare\r\n DIGESTS[f] = {'src': None, 'r2': None, 'ghidra': None}\r\n\r\n # calculate digest of src file\r\n DIGESTS[f]['src'] = digest(join(SRC, f))\r\n\r\n # name adjustment\r\n f2 = f.replace(\".c\", \".o\")\r\n\r\n # calculate digest of ghidra and r2 outputs\r\n DIGESTS[f]['ghidra'] = digest(join(GHIDRA_PATH, GHIDRA_NAME.format(f2)))\r\n DIGESTS[f]['r2'] = digest(join(R2DEC_PATH, R2DEC_NAME.format(f2)))\r\n\r\n # obtain the similarity from source\r\n SCORES[f] = {'ghidra': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['ghidra']),\r\n 'r2': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['r2']),\r\n 'x': get_lzjd_sim(DIGESTS[f]['ghidra'], DIGESTS[f]['r2'])}\r\n\r\n gidra_doms = 0\r\n for f in SCORES:\r\n print(\"{0:12}: Scores G:{1:20} R2:{2:20} X:{3:20} D:{4:20}\".format(f,\r\n SCORES[f]['ghidra'],\r\n SCORES[f]['r2'],\r\n SCORES[f]['x'],\r\n SCORES[f]['ghidra'] - SCORES[f]['r2']))\r\n if SCORES[f]['ghidra'] > SCORES[f]['r2']:\r\n gidra_doms += 1\r\n print(\"Ghidra Dominated on {} files\".format(gidra_doms))\r\n # This section of code prepares visualizations on the data for easy analysis\r\n plot_scatter(SCORES, title=\"LZJD Full File scores\")\r\n\r\n # obtian the scores as input data to the plots\r\n bxplt_data_gd = [score['ghidra'] for score in SCORES.values()]\r\n bxplt_data_r2 = [score['r2'] for score in SCORES.values()]\r\n\r\n # run pairwise t test\r\n print(\"Performing T-Test on LZJD Distance of files\")\r\n run_ttest(bxplt_data_gd, bxplt_data_r2)", "def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return", "def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs\"\n \"/lake_analysis_one_21_Jun_2021\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_two_26_Mar_2022\")\n ref_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_flowtocell.nc\")\n data_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_flowtocell.nc\")\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_extract_ls_mask_from_corrected_HD_rdirs_20160504_142435.nc\")\n ref_catchment_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_catchments.nc\")\n data_catchment_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_catchments.nc\")\n ref_rdirs_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_rdirs.nc\")\n reference_rmouth_outflows_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_flowtorivermouths.nc\")\n data_rmouth_outflows_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_flowtorivermouths.nc\")\n #glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=80,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='extensive',\n catchment_and_outflows_mods_list_filename=\\\n None,\n #\"catch_and_outflow_mods_ice6g_vs_ice5g_lgm.txt\",\n #additional_matches_list_filename=\\\n #\"additional_matches_ice6g_vs_ice5g_lgm.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='HD')", "def optimize_dcr(dg):\n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n tb_data = sto.read_object(tb_raw, f_raw)\n \n cycle = dg.fileDB['cycle'].values[0]\n f_results = f'./temp_{cycle}.h5'\n \n write_output = True\n \n # adjust dsp config \n with open('opt_dcr.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n # pprint(dsp_config)\n # exit()\n \n # set dcr parameters\n # rise, flat, dcr_tstart = 200, 1000, 'tp_0+1.5*us' # default\n # dcr_rise, dcr_flat, dcr_tstart = 100, 3000, 'tp_0+3*us' # best so far?\n dcr_rise, dcr_flat, dcr_tstart = 100, 2500, 'tp_0+1*us'\n dsp_config['processors']['dcr_raw']['args'][1] = dcr_rise\n dsp_config['processors']['dcr_raw']['args'][2] = dcr_flat\n dsp_config['processors']['dcr_raw']['args'][3] = dcr_tstart\n \n # set trap energy parameters\n # ene_rise, ene_flat = \"2*us\", \"1*us\" # best? from optimize_trap\n ene_rise, ene_flat = \"10*us\", \"5*us\"\n dsp_config['processors']['wf_trap']['args'][1] = ene_rise\n dsp_config['processors']['wf_trap']['args'][2] = ene_flat\n \n # adjust pole-zero constant\n dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '64.4*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '50*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '100*us'\n \n # run dsp\n print('Running DSP ...')\n t_start = time.time()\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=1)\n pc.execute()\n t_elap = (time.time() - t_start)/60\n print(f'Done. Elapsed: {t_elap:.2f} min')\n \n df_out = tb_out.get_dataframe()\n \n if write_output:\n df_out.to_hdf(f_results, key='opt_dcr')\n print('Wrote output file:', f_results)", "def vorticity(tsr,solidity):\n \n # Reading in csv file (vorticity database)\n basepath = path.join(path.dirname(path.realpath(__file__)),'data')\n fdata = basepath + path.sep + 'vortdatabase.csv'\n f = open(fdata)\n csv_f = csv.reader(f)\n \n i = 0\n sol_d = np.array([])\n for row in csv_f:\n if i == 0:\n raw = row\n raw = np.delete(raw,0)\n vortdat = raw\n tsr_d = raw # range of tip-speed ratios included\n if row[0] == 'solidity':\n sol_d = np.append(sol_d,float(row[1])) # range of solidities included\n elif row[0] != 'TSR' and row[0] != 'solidity':\n raw = row\n raw = np.delete(raw,0)\n vortdat = np.vstack([vortdat,raw]) # adding entry to vorticity database array\n i += 1\n f.close()\n \n vortdat = np.delete(vortdat,(0),axis=0) # eliminating first row used as a placeholder\n tsr_d = tsr_d.astype(np.float) # converting tip-speed ratio entries into floats\n vortdat = vortdat.astype(np.float) # converting vorticity database entries into floats\n \n # Creating arrays for each EMG parameter\n for i in range(np.size(sol_d)):\n sol = str(i+1)\n \n exec('s'+sol+'_loc1 = vortdat[i*10]\\ns'+sol+'_loc2 = vortdat[i*10+1]\\ns'+sol+'_loc3 = vortdat[i*10+2]\\ns'+sol+'_spr1 = vortdat[i*10+3]\\ns'+sol+'_spr2 = vortdat[i*10+4]\\ns'+sol+'_skw1 = vortdat[i*10+5]\\ns'+sol+'_skw2 = vortdat[i*10+6]\\ns'+sol+'_scl1 = vortdat[i*10+7]\\ns'+sol+'_scl2 = vortdat[i*10+8]\\ns'+sol+'_scl3 = vortdat[i*10+9]\\n')\n \n # BIVARIATE SPLINE FITTING\n \n iz = np.size(sol_d)\n jz = np.size(tsr_d)\n \n # Initializing rectangular matrices\n Z_loc1 = np.zeros((iz,jz))\n Z_loc2 = np.zeros((iz,jz))\n Z_loc3 = np.zeros((iz,jz))\n Z_spr1 = np.zeros((iz,jz))\n Z_spr2 = np.zeros((iz,jz))\n Z_skw1 = np.zeros((iz,jz))\n Z_skw2 = np.zeros((iz,jz))\n Z_scl1 = np.zeros((iz,jz))\n Z_scl2 = np.zeros((iz,jz))\n Z_scl3 = np.zeros((iz,jz))\n \n # Transferring raw data into rectangular matrices\n for i in range(iz):\n for j in range(jz):\n sol = str(i+1)\n exec('Z_loc1[i,j] = s'+sol+'_loc1[j]')\n exec('Z_loc2[i,j] = s'+sol+'_loc2[j]')\n exec('Z_loc3[i,j] = s'+sol+'_loc3[j]')\n exec('Z_spr1[i,j] = s'+sol+'_spr1[j]')\n exec('Z_spr2[i,j] = s'+sol+'_spr2[j]')\n exec('Z_skw1[i,j] = s'+sol+'_skw1[j]')\n exec('Z_skw2[i,j] = s'+sol+'_skw2[j]')\n exec('Z_scl1[i,j] = s'+sol+'_scl1[j]')\n exec('Z_scl2[i,j] = s'+sol+'_scl2[j]')\n exec('Z_scl3[i,j] = s'+sol+'_scl3[j]')\n \n # Creating a rectangular bivariate spline of the parameter data\n s_loc1 = RectBivariateSpline(sol_d,tsr_d,Z_loc1)\n s_loc2 = RectBivariateSpline(sol_d,tsr_d,Z_loc2)\n s_loc3 = RectBivariateSpline(sol_d,tsr_d,Z_loc3)\n s_spr1 = RectBivariateSpline(sol_d,tsr_d,Z_spr1)\n s_spr2 = RectBivariateSpline(sol_d,tsr_d,Z_spr2)\n s_skw1 = RectBivariateSpline(sol_d,tsr_d,Z_skw1)\n s_skw2 = RectBivariateSpline(sol_d,tsr_d,Z_skw2)\n s_scl1 = RectBivariateSpline(sol_d,tsr_d,Z_scl1)\n s_scl2 = RectBivariateSpline(sol_d,tsr_d,Z_scl2)\n s_scl3 = RectBivariateSpline(sol_d,tsr_d,Z_scl3)\n \n # Selecting the specific parameters to use for TSR and solidity\n loc1 = s_loc1(solidity,tsr)\n loc2 = s_loc2(solidity,tsr)\n loc3 = s_loc3(solidity,tsr)\n spr1 = s_spr1(solidity,tsr)\n spr2 = s_spr2(solidity,tsr)\n skw1 = s_skw1(solidity,tsr)\n skw2 = s_skw2(solidity,tsr)\n scl1 = s_scl1(solidity,tsr)\n scl2 = s_scl2(solidity,tsr)\n scl3 = s_scl3(solidity,tsr)\n \n # Creating arrays of the parameters\n loc = np.array([loc1[0,0],loc2[0,0],loc3[0,0]])\n spr = np.array([spr1[0,0],spr2[0,0]])\n skw = np.array([skw1[0,0],skw2[0,0]])\n scl = np.array([scl1[0,0],scl2[0,0],scl3[0,0]])\n \n return loc,spr,skw,scl", "def main() -> int:\n ucvm_out = \"\"\n for j in frange(CORNERS[\"bl\"][\"n\"], CORNERS[\"ur\"][\"n\"], SPACING):\n for i in frange(CORNERS[\"bl\"][\"e\"], CORNERS[\"ur\"][\"e\"] + SPACING, SPACING):\n ucvm_out += \"%.2f %.2f 0\\n\" % (i, j)\n os.chdir(\"/Users/davidgil/ucvm-15.10.0/bin\")\n proc = Popen(\n [\"./ucvm_query\", \"-f\", \"../conf/ucvm.conf\"], stdout=PIPE, stdin=PIPE, stderr=STDOUT\n )\n out_arr = np.zeros(\n shape=(\n int((CORNERS[\"ur\"][\"n\"] - CORNERS[\"bl\"][\"n\"]) / SPACING) + 2,\n int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING) + 2\n )\n )\n output = proc.communicate(input=ucvm_out.encode(\"ASCII\"))[0]\n i = 0\n j = 0\n for line in output.decode(\"ASCII\").split(\"\\n\")[2:-1]:\n line_split = line.split()\n try:\n out_arr[j][i] = float(line_split[4])\n except IndexError:\n print(line_split)\n if i == int((CORNERS[\"ur\"][\"e\"] - CORNERS[\"bl\"][\"e\"]) / SPACING):\n i = 0\n j += 1\n else:\n i += 1\n np.save(\"vs30.dat\", out_arr)\n return 0", "def run(config, tim=None):\n import common_lib\n import dr_lib\n import DST\n \n if tim is not None:\n tim.getTime(False)\n old_time = tim.getOldTime()\n\n if config.data is None:\n raise RuntimeError(\"Need to pass a data filename to the driver \"\\\n +\"script.\")\n\n # Read in geometry if one is provided\n if config.inst_geom is not None:\n if config.verbose:\n print \"Reading in instrument geometry file\"\n \n inst_geom_dst = DST.getInstance(\"application/x-NxsGeom\",\n config.inst_geom)\n else:\n inst_geom_dst = None\n\n config.so_axis = \"time_of_flight\"\n\n # Steps 1-3: Produce a scaled summed dark current dataset\n dc_som = dr_lib.scaled_summed_data(config.dkcur, config,\n dataset_type=\"dark_current\",\n timer=tim)\n\n # Perform Steps 3-6 on black can data\n if config.bcan is not None:\n b_som1 = dr_lib.calibrate_dgs_data(config.bcan, config, dc_som,\n dataset_type=\"black_can\",\n inst_geom_dst=inst_geom_dst,\n tib_const=config.tib_const,\n cwp=config.cwp_bcan,\n timer=tim)\n else:\n b_som1 = None\n\n # Perform Steps 3-6 on empty can data \n if config.ecan is not None:\n e_som1 = dr_lib.calibrate_dgs_data(config.ecan, config, dc_som,\n dataset_type=\"empty_can\",\n inst_geom_dst=inst_geom_dst,\n tib_const=config.tib_const,\n cwp=config.cwp_ecan,\n timer=tim)\n else:\n e_som1 = None\n\n # Perform Steps 3-6 on normalization data\n n_som1 = dr_lib.calibrate_dgs_data(config.data, config, dc_som,\n dataset_type=\"normalization\",\n inst_geom_dst=inst_geom_dst,\n tib_const=config.tib_const,\n cwp=config.cwp_data,\n timer=tim)\n\n # Perform Steps 7-16 on normalization data\n if config.norm_trans_coeff is None:\n norm_trans_coeff = None\n else:\n norm_trans_coeff = config.norm_trans_coeff.toValErrTuple()\n\n # Determine if we need to rebin the empty or black can data\n if config.ecan is not None and e_som1 is not None:\n ecan_cwp = True\n else:\n ecan_cwp = False\n\n if config.bcan is not None and b_som1 is not None:\n bcan_cwp = True\n else:\n bcan_cwp = False \n\n cwp_used = ecan_cwp or bcan_cwp\n\n n_som2 = dr_lib.process_dgs_data(n_som1, config, b_som1, e_som1,\n norm_trans_coeff,\n dataset_type=\"normalization\",\n cwp_used=cwp_used,\n timer=tim)\n \n del n_som1, b_som1, e_som1\n\n # Step 17: Integrate normalization spectra\n if config.verbose:\n print \"Integrating normalization spectra\"\n\n if tim is not None:\n tim.getTime(False)\n\n if config.norm_int_range is None:\n start_val = float(\"inf\")\n end_val = float(\"inf\")\n else:\n if not config.wb_norm:\n # Translate energy transfer to final energy\n ef_start = config.initial_energy.getValue() - \\\n config.norm_int_range[0]\n ef_end = config.initial_energy.getValue() - \\\n config.norm_int_range[1]\n # Convert final energy to final wavelength\n start_val = common_lib.energy_to_wavelength((ef_start, 0.0))[0]\n end_val = common_lib.energy_to_wavelength((ef_end, 0.0))[0]\n else:\n start_val = config.norm_int_range[0]\n end_val = config.norm_int_range[1]\n \n n_som3 = dr_lib.integrate_spectra(n_som2, start=start_val,\n end=end_val, width=True)\n\n del n_som2\n \n if tim is not None:\n tim.getTime(msg=\"After integrating normalization spectra \")\n\n file_comment = \"Normalization Integration range: %0.3fA, %0.3fA\" \\\n % (start_val, end_val)\n \n hlr_utils.write_file(config.output, \"text/num-info\", n_som3,\n output_ext=\"norm\",\n data_ext=config.ext_replacement,\n path_replacement=config.path_replacement,\n verbose=config.verbose,\n message=\"normalization values\",\n comments=[file_comment],\n tag=\"Integral\", units=\"counts\") \n \n if tim is not None:\n tim.getTime(False)\n\n if config.verbose:\n print \"Making mask file\"\n\n # Make mask file from threshold\n dr_lib.filter_normalization(n_som3, config.lo_threshold,\n config.hi_threshold, config)\n\n if tim is not None:\n tim.getTime(msg=\"After making mask file \")\n\n # Write out RMD file\n n_som3.attr_list[\"config\"] = config\n\n hlr_utils.write_file(config.output, \"text/rmd\", n_som3,\n output_ext=\"rmd\",\n data_ext=config.ext_replacement, \n path_replacement=config.path_replacement,\n verbose=config.verbose,\n message=\"metadata\")\n \n if tim is not None:\n tim.setOldTime(old_time)\n tim.getTime(msg=\"Total Running Time\")", "def test_fill_data(self):\n self.full_iv.get_data()\n df_iv = self.full_iv.calc_iv()\n\n self.assertTrue(len(df_iv))\n\n db = pd.HDFStore('test.h5')\n db['iv'] = df_iv\n db.close()", "def test_compute_Sv_ek80_CW_power_BB_complex(ek80_path):\n ek80_raw_path = ek80_path / \"Summer2018--D20180905-T033113.raw\"\n ed = ep.open_raw(ek80_raw_path, sonar_model=\"EK80\")\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"CW\", encode_mode=\"power\"\n )\n assert isinstance(ds_Sv, xr.Dataset)\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"BB\", encode_mode=\"complex\"\n )\n assert isinstance(ds_Sv, xr.Dataset)", "def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_one_21_Jun_2021/rivers/results/\"\n \"default_orog_corrs/\"\n \"diag_version_29_date_0_original_truesinks\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_two_26_Mar_2022/\"\n \"rivers/results/diag_version_32_date_0_with_truesinks\")\n ref_filename=os.path.join(ref_base_dir,\"10min_flowtocell.nc\")\n data_filename=os.path.join(data_base_dir,\"10min_flowtocell.nc\")\n #lsmask_filename=os.path.join(self.plots_data_dir,\n # \"ls_mask_extract_ls_mask_from_corrected_HD_rdirs_20160504_142435.nc\")\n lsmask_filename=None\n ref_catchment_filename=os.path.join(ref_base_dir,\n \"10min_catchments_ext.nc\")\n data_catchment_filename=os.path.join(data_base_dir,\n \"10min_catchments_ext.nc\")\n ref_rdirs_filename=os.path.join(ref_base_dir,\n \"10min_rdirs.nc\")\n data_rdirs_filename=os.path.join(data_base_dir,\n \"10min_rdirs.nc\")\n reference_rmouth_outflows_filename=os.path.join(ref_base_dir,\n \"10min_rmouth_flowtocell.nc\")\n data_rmouth_outflows_filename=os.path.join(data_base_dir,\n \"10min_rmouth_flowtocell.nc\")\n #glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=80*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=20*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n data_rdirs_filename,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=20*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=5*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=False,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n data_rdirs_filename,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=5*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=\\\n data_rdirs_filename,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=2*9/3,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='minimal',\n additional_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/additional_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day_ext.txt\",\n additional_truesink_matches_list_filename=\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"addmatches/addmatches_truesinks/\"\n \"additional_truesinks_matches_10min_upscaled_\"\n \"MERIT_rdirs_vs_modern_day.txt\",\n catchment_and_outflows_mods_list_filename=\\\n \"/Users/thomasriddick/Documents/data/HDdata/\"\n \"catchmods/catch_and_outflow_mods_10min_upscaled_MERIT_rdirs_vs_modern_day.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=False,\n show_true_sinks=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='LatLong10min')", "def test_calc_SR5():\n tb = TradeBot(\n pair='GBP_JPY',\n timeframe='D',\n start='2018-12-20 22:00:00',\n end='2019-01-17 22:00:00',\n settingf=\"../../data/settings.ini\"\n )\n\n harealst = tb.calc_SR(datetime.datetime(2019, 1, 7, 22, 0))\n\n assert len(harealst.halist) == 6\n assert 0", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def test_data_handling_nc_cc():\n\n input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'),\n os.path.join(TEST_DATA_DIR, 'va_test.nc'),\n os.path.join(TEST_DATA_DIR, 'orog_test.nc'),\n os.path.join(TEST_DATA_DIR, 'zg_test.nc')]\n\n with xr.open_mfdataset(input_files) as fh:\n min_lat = np.min(fh.lat.values)\n min_lon = np.min(fh.lon.values)\n target = (min_lat, min_lon)\n plevel = fh.plev[-1]\n ua = np.transpose(fh['ua'][:, -1, ...].values, (1, 2, 0))\n va = np.transpose(fh['va'][:, -1, ...].values, (1, 2, 0))\n\n handler = DataHandlerNCforCC(input_files, features=['U_100m', 'V_100m'],\n target=target, shape=(20, 20),\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n assert handler.data.shape == (20, 20, 20, 2)\n\n handler = DataHandlerNCforCC(input_files,\n features=[f'U_{int(plevel)}pa',\n f'V_{int(plevel)}pa'],\n target=target, shape=(20, 20),\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n if handler.invert_lat:\n handler.data = handler.data[::-1]\n assert handler.data.shape == (20, 20, 20, 2)\n assert np.allclose(ua, handler.data[..., 0])\n assert np.allclose(va, handler.data[..., 1])", "def test_cambridge_rent_price_per_sqft():\n dataframe = get_final_zillow_dataframe()\n cambridge = get_city_state_row(dataframe, 'cambridge', 'massachusetts')\n assert round(cambridge.iloc[0].get('ZRIFAH'), 1) == 2.9", "def test_3d_steam_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n write_readback(dic,data)", "def test_CCI_SM_v033_025Ds_img_reading():\n parameter = ['sm']\n data_path = os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"combined\")\n img_c = CCI_SM_025Ds(data_path=data_path, parameter=parameter,\n subgrid=None, array_1D=False)\n\n image_c = img_c.read(datetime(2016, 1, 1, 0))\n\n assert sorted(image_c.data.keys()) == sorted(parameter)\n assert image_c.timestamp == datetime(2016, 1, 1, 0)\n nptest.assert_almost_equal(image_c.data['sm'][273, 693], 0.142998, 5)\n assert image_c.lon.shape == image_c.lat.shape == (720, 1440)\n\n\n data_path = os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"active\")\n img_a = CCI_SM_025Ds(data_path=data_path, parameter=parameter, subgrid=None,\n array_1D=False)\n image_a = img_a.read(datetime(2016, 1, 1, 0))\n\n assert sorted(image_a.data.keys()) == sorted(parameter)\n assert image_a.timestamp == datetime(2016, 1, 1, 0)\n nptest.assert_almost_equal(image_a.data['sm'][273, 693], 18.92771, 5)\n assert image_a.lon.shape == image_a.lat.shape == (720, 1440)\n\n\n data_path = os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"passive\")\n img_p = CCI_SM_025Ds(data_path=data_path, parameter=parameter, subgrid=None,\n array_1D=False)\n\n image_p = img_p.read(datetime(2016, 1, 1, 0))\n\n assert sorted(image_p.data.keys()) == sorted(parameter)\n assert image_p.timestamp == datetime(2016, 1, 1, 0)\n nptest.assert_almost_equal(image_p.data['sm'][273, 693], 0.0700, 5)\n\n assert image_p.lon.shape == image_p.lat.shape == (720, 1440)", "def compute_contrast_sphere_offline(path_raw,path_reduced,path_output,debug=True):\r\n try:\r\n if not type(path_raw) is PosixPath:\r\n path_raw = Path(path_raw)\r\n if type(path_reduced) is PosixPath:\r\n path_reduced = Path(path_reduced)\r\n if not type(path_raw) is PosixPath:\r\n path_output = Path(path_output) \r\n for path in [path_raw,path_reduced]:\r\n if not path.exists():\r\n print('The input path {0:s} does not exists. Returning'.format(str(path)))\r\n return\r\n if not path_output.exists():\r\n path_output.mkdir()\r\n except:\r\n print(sys.exc_info()[0])\r\n \r\n \r\n #%% Step 0 : \r\n # Load all raw frames and list the original filename, along with the type \r\n # and archive names\r\n\r\n raw_files = []\r\n raw_archive_names = []\r\n raw_dpr_types = []\r\n \r\n files_sphere_raw = sorted(path_raw.glob('SPHERE*IRDIS*OBS*fits'))\r\n for file in files_sphere_raw:\r\n try:\r\n h=fits.getheader(file)\r\n if 'ALC' in h['HIERARCH ESO INS COMB ICOR'] and 'IRDIS' in h['HIERARCH ESO SEQ ARM']: \r\n raw_files.append(files_sphere_raw)\r\n raw_archive_names.append(h['ARCFILE'])\r\n raw_dpr_types.append(h['HIERACH ESO DPR TYPE'])\r\n except:\r\n continue\r\n \r\n\r\n #%% Step 1 : \r\n \r\n # You look for all reduced frames that correspond to \r\n # coronagraphic data with PRO.CATG as \r\n # - IRD_SCIENCE_DBI_LEFT_CUBE \r\n # - IRD_SCIENCE_DBI_RIGHT_CUBE\r\n # - IRD_STAR_CENTER\r\n \r\n files_sphere_reduced = sorted(path_reduced.glob('r.SPHER*.fits'))\r\n reduced_files_dbi_left = [] \r\n raw_files_dbi_left = [] # careful !! This is not the raw file name as saved in raw/date/\r\n \r\n reduced_files_dbi_right = []\r\n raw_files_dbi_right = []\r\n \r\n for file in files_sphere_reduced:\r\n try:\r\n h=fits.getheader(file)\r\n # check whether this is a coronagraphic frame\r\n if 'ALC' in h['HIERARCH ESO INS COMB ICOR']:\r\n if h['HIERARCH ESO PRO CATG'] == 'IRD_SCIENCE_DBI_LEFT_CUBE':\r\n reduced_files_dbi_left.append(file)\r\n raw_files_dbi_left.append(h['HIERARCH ESO PRO REC1 RAW1 NAME'])\r\n if h['HIERARCH ESO PRO CATG'] == 'IRD_SCIENCE_DBI_RIGHT_CUBE':\r\n reduced_files_dbi_right.append(file)\r\n raw_files_dbi_right.append(h['HIERARCH ESO PRO REC1 RAW1 NAME'])\r\n except:\r\n continue\r\n\r\n \r\n \r\n #%% Step 2: \r\n # You isolate for each raw frames one single reduced left and right cube \r\n # (currently the pipeline does multiple reductions of the same raw cube for \r\n # an unknown reason, we need to get rid of the duplicates here).\r\n \r\n unique_reduced_files_dbi_left,indices = np.unique(reduced_files_dbi_left,return_index=True)\r\n unique_raw_files_dbi_left = [raw_files_dbi_left[i] for i in indices]\r\n \r\n unique_reduced_files_dbi_right,indices = np.unique(reduced_files_dbi_right,return_index=True)\r\n unique_raw_files_dbi_right = [raw_files_dbi_right[i] for i in indices]\r\n \r\n \r\n #%% Step 3\r\n # You extract DIT, NDIT, NAXIS3, coronagraph name:\r\n # Lyot stop: HIERARCH ESO INS1 OPTI1 NAME\r\n # Lyot mask: HIERARCH ESO INS4 OPTI11 NAME\r\n # coro combination name: HIERARCH ESO INS COMB ICOR\r\n # along with the ND filter and the IRDIS filter from each RAW frame.\r\n # Also extract all info that will be useful later for the analysis:\r\n # star name and magnitude, airmass, seeing, coherence time, telescope Seeing...)\r\n # This is important to do that on each raw frame on not on each reduced frame\r\n # as the keywords are changed by the pipeline and unreliable.\r\n \r\n \r\n \r\n #%% Step 4\r\n # Assoiate each FLUX with a CORONAGRAPHIC image\r\n \r\n \r\n \r\n #%% Step 5\r\n # Work on the Flux frame first:\r\n # - Detect the central star\r\n # - measure the FWHM, validate that this is the star by checking the FWHM\r\n # (should be the diffraction limit)\r\n # - aperture photometry on the star using a diameter of 1 lambda/D \r\n # - divide the flux by the DIT and correct by the ND transmission: this\r\n # gives you the star reference flux\r\n \r\n \r\n #%% Step 6\r\n # Work on the coronographic frame now:\r\n # - Detect the coronagraphic center.\r\n # - compute the contrast (using the standard deviation of many apertures \r\n # place at a given separation, using for instance the python module VIP)\r\n\r\n\r\n #%% Step 7:\r\n # Divide the azinuthal standard deviations by the star reference flux to obtain\r\n # the contrast as a function of radius. \r\n # Plot the result and save in a csv file along with all relevant parameters \r\n # for the analysis (coronagraph name, star magnitude, airmass, seeing, coherence time)\r", "def initialize_data(self , station = '', datasets = {} ): \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n self.out_name = self.out_dir + '/' + self.station + '_CEUAS_premerged_v0.nc'\n\n self.observations_table_vars = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units', 'source_id']\n\n \"\"\" Loading the econding of the tables created from the harvester script and to be applied again \"\"\"\n self.encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n self.encodings['era5fb'] = np.load('era5fb_encodings_all.npy' , allow_pickle = True ).item() \n self.dic_type_attributes = np.load('dic_type_attributes.npy',allow_pickle= True).item()\n \n self.era5fb_columns = self.dic_type_attributes['era5fb'].keys()\n\n self.obstab_nans_filled = False \n\n data['cdm_tables'] = {} \n \n \"\"\" Loop over all the datasets \n k: name of the dataset \n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ] \"\"\" \n for k,v in self.datasets.items() :\n data[k] = {}\n for F in v:\n \n logging.info(' Dataset ::: *** %s %s ' , k , F ) \n \n data[k][F] = {}\n\n h5py_file = h5py.File(F, 'r')\n data[k][F]['h5py_file'] = h5py_file \n \n a = h5py_file['recordtimestamp']\n \n data[k][F]['recordtimestamp'] = a\n data[k][F]['recordindex'] = h5py_file['recordindex']\n data[k][F]['dateindex'] = h5py_file['dateindex']\n a = h5py_file['recordtimestamp']\n data[k][F]['max_date'] = max(a)\n data[k][F]['min_date'] = min(a)\n \n data[k][F]['counter'] = 0\n\n #######\n # HEADER TABLE\n #######\n head_tab = h5py_file['header_table']\n logging.info('*** header_table')\n data[k][F]['header_table'] = {}\n for var in head_tab.keys():\n if ('string' in var or 'hdrlen' in var): continue\n try: \n data[k][F]['header_table'][var] = (np.array(head_tab[var][:])).astype(self.dic_type_attributes['header_table'][var]['type'] )\n except:\n print('failed convertion type header' , k , ' ' , F , ' ' , var )\n \n ####### \n # STATION CONFIGURATION\n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'station_configuration' , decode_times = False )\n data[k][F]['station_configuration'] = d.to_dataframe()\n logging.debug('Done with %s station_configuration' , str(k) )\n d.close()\n\n ####### \n # SOURCE CONFIGURATION \n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'source_configuration' , decode_times = False )\n data[k][F]['source_configuration'] = d\n logging.debug('Done with %s source_configuration' , str(k) )\n d.close()\n\n\n data['cdm_tables'] = {}\n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\"\n for t in self.standard_cdm: # [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n if t not in data['cdm_tables'].keys():\n #data['cdm_tables'][t] = ''\n cdm = xr.open_dataset(F , engine = 'h5netcdf' , group = t )\n data['cdm_tables'][t] = cdm \n\n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n\n self.data = data\n\n \"\"\" Making all date_times \"\"\"\n self.make_all_datetime()", "def Read_Spectrum(Path,borne1 = 0,borne2 = 0) :\n x,y=[],[]\n fs = open(Path, 'r')\n #print('Open new fic') \n#index_array = 0\n while 1: \n txt = fs.readline()\n #print(txt)\n if ((txt =='')|(txt == '\\r\\n')): \n break\n #print(txt)\n ii=-1\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1\n #print(ii)\n if ((txt[ii] == ' ') |(txt[ii] == '\\t')):\n break\n \n x.append(float(txt[0:ii]))\n y.append(float(txt[ii:])) \n# if len(txt) == 21 : #nu >= 10000 cm-1\n# x.append(float(txt[0:11]))\n# y.append(float(txt[11:]))\n# elif len(txt) == 20 : #nu >= 1000 cm-1\n# x.append(float(txt[0:10]))\n# y.append(float(txt[10:]))\n# elif len(txt) == 19 : #nu >= 100 cm-1\n# x.append(float(txt[0:9]))\n# y.append(float(txt[9:]))\n# elif len(txt) == 18 : #nu >= 10 cm-1\n# x.append(float(txt[0:8]))\n# y.append(float(txt[8:]))\n# elif len(txt) == 17 : #nu >= 1 cm-1\n# x.append(float(txt[0:7]))\n# y.append(float(txt[7:]))\n\n #x[index_array],y[index_array] = float(txt[0:9]),float(txt[10:17])\n #index_array = index_array+1\n \n fs.close()\n x = np.array(x)\n y = np.array(y)\n if ((borne1 == 0) & (borne2 == 0)) :\n pass \n else :\n index_ok = ((x<borne2) & (x>borne1))\n x = x[index_ok]\n y = y[index_ok]\n\n return x,y", "def analyzeViSDEMData(dict):\n \n if 'path_in' in dict:\n path_in = dict['path_in']\n else:\n print(\"Caution: No path for input folder containing the data has been defined. Please define path to folder by dict['path_in']=path_in\") \n return\n \n path_out_default = '../colordeficiency-data/' \n if 'path_out' in dict:\n path_out = dict['path_out']\n else:\n print(\"Caution: No path for output folder where the data should be stored has been defined. Using default output path instead: \"+str(path_out_default))\n path_out = path_out_default\n \n if 'round' in dict:\n round = dict['round']\n else:\n print(\"Error: You have to chose a round first.\")\n \n path = os.path.join(os.path.dirname(os.path.abspath(os.path.join(__file__,os.pardir))),'colordeficiency-data')\n \n # 0. Step: Get all the relevant information, i.e. motive_ids, obs_col_defs etc.\n if round == 1:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids.csv\")\n elif round == 2:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids_2.csv\")\n \n vs_ids_sheet = pandas.read_csv(visualsearch_ids,sep=\";\")\n \n # Get all the relevant information about the observers, i.e. obs_col_defs etc.\n observer_ids = os.path.join(path,\"observer_ids.csv\")\n obs_ids_sheet = pandas.read_csv(observer_ids,sep=\";\")\n \n # 1. Step: Read all the XLSX data in the path\n ext = 'xlsx'; xlsx_files = getAllXXXinPath(path_in,ext)\n dataArray = pandas.DataFrame()\n i=1\n for xlsx_file in xlsx_files:\n sys.stdout.write(xlsx_file)\n dataArray_tmp, testArray, extraDataDict = extractExperimentData(os.path.join(path_in,xlsx_file))\n \n newDataArray = dataArray_tmp[['dalt_id','coldef_type','resp.corr_raw','resp.rt_raw','stimFile']]\n \n if \"2. Session\" in extraDataDict:\n sessionID = int(extraDataDict['2. Session'])\n newDataArray['session_id'] = sessionID\n \n if 'group' in extraDataDict:\n obsGroup = str(extraDataDict['group'])\n newDataArray['obsGroup'] = obsGroup\n \n if '0. Participant ID' in extraDataDict:\n obsID = int(extraDataDict['0. Participant ID'])\n \n newDataArray['observer_id'] = obsID\n obs_coldef_type = obs_ids_sheet.loc[obs_ids_sheet['observer_id']==obsID,['observer_coldef_type']]\n newDataArray['observer_coldef_type'] = int(obs_coldef_type['observer_coldef_type'])\n \n dataArray = pandas.concat([dataArray, newDataArray])\n sys.stdout.write(' . ')\n if (i%5)==0: sys.stdout.write('\\n')\n i+=1\n sys.stdout.write('\\n')\n \n # 2. Step: Adapt values to programstandards\n for item in settings.colDefLong2ID:\n dataArray.loc[dataArray['coldef_type'] == item, ['coldef_type']] = settings.colDefLong2ID[item]\n \n for item in settings.dalt2ID:\n dataArray.loc[dataArray['dalt_id'] == item, ['dalt_id']] = settings.dalt2ID[item]\n \n dataArray.loc[dataArray['dalt_id'] == 'none', ['dalt_id']] = 0\n \n \n dataArray = dataArray.rename(columns={'dalt_id': 'dalt_id',\n 'coldef_type': 'coldef_type',\n 'resp.corr_raw': 'is_correct',\n 'resp.rt_raw': 'resp_time',\n 'stimFile': 'filepath'})\n dataArray = dataArray.reset_index()\n \n # Adding set_id, motive_id and variant_id to each file\n for index, row in dataArray.iterrows():\n path_tmp = row['filepath']\n filename = os.path.basename(path_tmp).split('.')[0]\n dict_tmp = getStatsFromFilename(filename)\n imgID_tmp = int(dict_tmp['img_id'])\n \n tempVSDataArray = vs_ids_sheet.loc[vs_ids_sheet['image_id']==imgID_tmp,['set_id','motive_id','variant_id']]\n \n dataArray.at[index,'image_id'] = imgID_tmp\n dataArray.ix[index,'set_id'] = int(tempVSDataArray['set_id'])\n dataArray.ix[index,'motive_id'] = int(tempVSDataArray['motive_id'])\n dataArray.ix[index,'variant_id'] = int(tempVSDataArray['variant_id'])\n\n dataArray.image_id = dataArray.image_id.astype(int)\n dataArray.set_id = dataArray.set_id.astype(int)\n dataArray.motive_id = dataArray.motive_id.astype(int)\n dataArray.variant_id = dataArray.variant_id.astype(int)\n dataArray.is_correct = dataArray.is_correct.astype(bool)\n \n dataArray = dataArray[['image_id','set_id','motive_id','variant_id','dalt_id','coldef_type','is_correct','resp_time','observer_id','observer_coldef_type','session_id','filepath','obsGroup']]\n \n # 3. Saving data to file\n try:\n dataArray.to_csv(os.path.join(path_out, 'visdem-data.csv'),sep=\";\")\n sys.stdout.write(\"Success: ViSDEM data successfully saved in '\"+str(path_out)+\"'.\\n\")\n except Exception as e:\n print(e)", "def execute(self, in_nc, in_weight_table, out_nc, grid_name, conversion_flag, in_time_interval=\"6hr\"): # modified this line CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n\r\n # Validate the netcdf dataset\r\n vars_oi_index = self.dataValidation(in_nc)\r\n \r\n \"\"\"get conversion factor the flag is used to differentiate forecasts converted \r\n to netCDF from GRIB and the original netCDF. They both use the same weight tables\r\n but the original netCDF is in mm whereas the stock GRIB forecasts are in meters.\r\n Set the conversion_flag in the run.py configuration file.\r\n \"\"\"\r\n if conversion_flag: # Line Added CJB 20190218\r\n conversion_factor = 1.0 #Line Modified CJB 20190218\r\n elif grid_name == 'ecmwf_t1279' or grid_name == 'ecmwf_tco639': # Line Modified CJB 20190218\r\n #if grid_name == 'ecmwf_HRES_F' or grid_name == 'ecmwf_ENS_F': # Line Added/Modified CJB 20190108\r\n #new grids in mm instead of m\r\n conversion_factor = 0.001\r\n else: #set the conversion factor to 1 for everything else (data is in m but legacy installations do not have a flag) Line Added CJB 20190218\r\n conversion_factor = 1.0 # Line Added CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n # identify if the input netcdf data is the High Resolution data with three different time intervals\r\n id_data = self.dataIdentify(in_nc)\r\n if id_data is None:\r\n raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the netcdf dataset'''\r\n data_in_nc = NET.Dataset(in_nc)\r\n time = data_in_nc.variables['time'][:]\r\n\r\n # Check the size of time variable in the netcdf data\r\n if len(time) == 0: # *** MJS This change seems like it is too loose an error trap; should it account for instances when nc file time var is != in length with id_data lenght?\r\n raise Exception(self.errorMessages[3])\r\n #if len(time) != self.length_time[id_data]:\r\n # raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the weight table '''\r\n print(\"Reading the weight table...\", in_weight_table)\r\n dict_list = {self.header_wt[0]:[], self.header_wt[1]:[], self.header_wt[2]:[],\r\n self.header_wt[3]:[], self.header_wt[4]:[]}\r\n\r\n with open(in_weight_table, \"r\") as csvfile:\r\n reader = csv.reader(csvfile)\r\n count = 0\r\n for row in reader:\r\n if count == 0:\r\n #check number of columns in the weight table\r\n if len(row) < len(self.header_wt):\r\n raise Exception(self.errorMessages[4])\r\n #check header\r\n if row[1:len(self.header_wt)] != self.header_wt[1:]:\r\n raise Exception(self.errorMessages[5])\r\n count += 1\r\n else:\r\n for i in range(len(self.header_wt)):\r\n dict_list[self.header_wt[i]].append(row[i])\r\n count += 1\r\n\r\n ''' Calculate water inflows\r\n as a reminder, the first 91 time steps are T=0 to T=90 and are 1-hourly for HRES\r\n\t\t the next 18 time steps for HRES are T=93 to T=144 at 3-hourly\r\n then the final 16 time steps are T=150 to T=240 at 6-hourly for a total of 125 records\r\n\t\t\tFor ENS, the first 49 time steps are T=0 to T=144 at 3-hourly\r\n\t\t\tthe final 35 time steps are T=150 to T=360 at 6-hourly for a total of 84 records\r\n '''\r\n\t\t\t\r\n print(\"Calculating water inflows...\")\r\n\t\t\r\n ''' \r\n added the next section CJB 20180122 \r\n '''\r\n\r\n\t\t# Get the overall number of time steps\r\n size_time = self.getTimeSize(in_nc) #CJB 20180122\r\n # Determine the size of time steps in each group (1-hourly, 3-hourly, and/or 6-hourly)\r\n if id_data == \"HRES1\": # T <= 90 \r\n time_size = (size_time - 1)\r\n elif id_data == \"HRES13\": # 93 <= T <= 144\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - 1)\r\n elif id_data == \"HRES136\": # 150 <= T <= 240\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n elif in_time_interval == \"3hr\": # MJS Doesn't seem to be a case used currently, but added just in case later need.\r\n time_size = self.length_time_opt[\"HighRes-3hr-sub\"] # MJS This is HRES136, i.e., if for some reason in ecmwf_rapid_multi a 3 hr is asked for for this case, it should still have the 3hr_sub number of times\r\n elif in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - self.length_time_opt[\"HighRes-3hr-Sub\"] - 1)\r\n elif id_data == \"ENS3\": # T <= 144\r\n time_size = (size_time - 1)\r\n elif id_data == \"ENS36\": # 150 <= T <= 360\r\n if in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"LowResFull-3hr-Sub\"] - 1)\r\n else: # id_data == \"ENS6\": # T <= 360 but all 6-hourly\r\n time_size = (size_time - 1)\r\n #else: # something is wrong and need to throw an error message - likely a corrupt forecast file\r\n # raise Exception(self.errorMessages[3])\r\n #''' end of added section CJB 20180122 \r\n #'''\r\n\r\n #if id_data == \"LowRes\":\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #elif id_data == \"Low3HrRes\":\r\n # size_time = self.length_time_opt[\"LowRes-3hr\"]\r\n #elif id_data == \"LowResFull\":\r\n # if in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #else: #HighRes\r\n # if in_time_interval == \"1hr\":\r\n # size_time = self.length_time_opt[\"HighRes-1hr\"]\r\n # elif in_time_interval == \"3hr\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr\"]\r\n # elif in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"HighRes-6hr\"]\r\n\r\n size_streamID = len(set(dict_list[self.header_wt[0]]))\r\n\r\n # Create output inflow netcdf data\r\n # data_out_nc = NET.Dataset(out_nc, \"w\") # by default format = \"NETCDF4\"\r\n data_out_nc = NET.Dataset(out_nc, \"w\", format = \"NETCDF3_CLASSIC\")\r\n #dim_Time = data_out_nc.createDimension('Time', size_time)\r\n dim_Time = data_out_nc.createDimension('Time', time_size)\r\n dim_RiverID = data_out_nc.createDimension('rivid', size_streamID)\r\n var_m3_riv = data_out_nc.createVariable('m3_riv', 'f4', \r\n ('Time', 'rivid'),\r\n fill_value=0)\r\n \r\n #data_temp = NUM.empty(shape = [size_time, size_streamID])\r\n data_temp = NUM.empty(shape = [time_size, size_streamID])\r\n\r\n lon_ind_all = [int(i) for i in dict_list[self.header_wt[2]]]\r\n lat_ind_all = [int(j) for j in dict_list[self.header_wt[3]]]\r\n\r\n # Obtain a subset of runoff data based on the indices in the weight table\r\n min_lon_ind_all = min(lon_ind_all)\r\n max_lon_ind_all = max(lon_ind_all)\r\n min_lat_ind_all = min(lat_ind_all)\r\n max_lat_ind_all = max(lat_ind_all)\r\n\r\n # self.vars_oi[vars_oi_index][3] = RO; get that variable's 3D structure (time, lat_index, lon_index) ready to reshape into 2D (time, lat_index x lon_index)\r\n data_subset_all = data_in_nc.variables[self.vars_oi[vars_oi_index][3]][:, min_lat_ind_all:max_lat_ind_all+1, min_lon_ind_all:max_lon_ind_all+1]\r\n len_time_subset_all = data_subset_all.shape[0]\r\n len_lat_subset_all = data_subset_all.shape[1]\r\n len_lon_subset_all = data_subset_all.shape[2]\r\n data_subset_all = data_subset_all.reshape(len_time_subset_all, (len_lat_subset_all * len_lon_subset_all))\r\n\r\n # compute new indices based on the data_subset_all\r\n index_new = []\r\n for r in range(0,count-1):\r\n ind_lat_orig = lat_ind_all[r]\r\n ind_lon_orig = lon_ind_all[r]\r\n index_new.append((ind_lat_orig - min_lat_ind_all)*len_lon_subset_all + (ind_lon_orig - min_lon_ind_all))\r\n\r\n # obtain a new subset of data\r\n data_subset_new = data_subset_all[:,index_new]*conversion_factor\r\n\r\n # start compute inflow\r\n pointer = 0\r\n for s in range(0, size_streamID):\r\n npoints = int(dict_list[self.header_wt[4]][pointer])\r\n # Check if all npoints points correspond to the same streamID\r\n if len(set(dict_list[self.header_wt[0]][pointer : (pointer + npoints)])) != 1:\r\n print(\"ROW INDEX {0}\".format(pointer))\r\n print(\"RIVID {0}\".format(dict_list[self.header_wt[0]][pointer]))\r\n raise Exception(self.errorMessages[2])\r\n\r\n area_sqm_npoints = [float(k) for k in dict_list[self.header_wt[1]][pointer : (pointer + npoints)]]\r\n area_sqm_npoints = NUM.array(area_sqm_npoints)\r\n area_sqm_npoints = area_sqm_npoints.reshape(1, npoints)\r\n data_goal = data_subset_new[:, pointer:(pointer + npoints)]\r\n \r\n \r\n #remove noise from data\r\n data_goal[data_goal<=0.00001] = 0\r\n\r\n ''' IMPORTANT NOTE: runoff variable in ECMWF dataset is cumulative instead of incremental through time\r\n '''\r\n # For data with Low Resolution, there's only one time interval 6 hrs\r\n if id_data == \"ENS6\": # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints\r\n elif id_data == \"ENS3\": # there's only one time interval 3 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\r\n elif id_data == \"HRES1\": # there's only one time interval 1 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\t\r\n #For data with the full version of Low Resolution, from Hour 0 to 144 (the first 49 time points) are of 3 hr time interval,\r\n # then from Hour 144 to 360 (36 time points) are of 6 hour time interval\r\n elif id_data == \"ENS36\": # Line Added/Modified CJB 20190108\r\n if in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n ro_stream = NUM.subtract(data_goal[1:49,], data_goal[:48,]) * area_sqm_npoints\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[49:,], data_goal[48:-1,]) * area_sqm_npoints\r\n else: #\"LowRes-6hr\"\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240\r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n #convert all to 6hr\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[2:49:2,], data_goal[:48:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[49:,], data_goal[48:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b]) * area_sqm_npoints\r\n #For data with High Resolution, from Hour 0 to 90 (the first 91 time points) are of 1 hr time interval,\r\n # then from Hour 90 to 144 (18 time points) are of 3 hour time interval, and from Hour 144 to 240 (16 time points)\r\n # are of 6 hour time interval\r\n ##########################################################\r\n # MJS The following should handle id_data = HRES13 and HRES136\r\n ##########################################################\r\n else:\r\n if in_time_interval == \"1hr\":\r\n #ro_stream = NUM.subtract(data_goal[1:91,],data_goal[:90,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:1+time_size,],data_goal[:time_size,]) * area_sqm_npoints # Line Added/Modified CJB, MJS 20190108\r\n elif in_time_interval == \"3hr\": # MJS HRES 3hr not currently used\r\n # calculate time series of 3 hr data from 1 hr data\r\n ro_3hr_a = NUM.subtract(data_goal[3:91:3,],data_goal[:88:3,])\r\n # get the time series of 3 hr data\r\n #ro_3hr_b = NUM.subtract(data_goal[91:109,], data_goal[90:108,])\r\n ro_3hr_b = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) # MJS modified again; seems no case for this, but just in case later... Line Added/Modified CJB 20190108\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_3hr_a, ro_3hr_b]) * area_sqm_npoints\r\n elif in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n #ro_stream = NUM.subtract(data_goal[91:109,], data_goal[90:108,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) * area_sqm_npoints # MJS modified again; needs to handle HRES13 that might not have complete 3hr set... Line Added/Modified CJB 20190108\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[109:,], data_goal[108:-1,]) * area_sqm_npoints\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240 \r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n else: # in_time_interval == \"6hr\"\r\n #arcpy.AddMessage(\"6hr\")\r\n # calculate time series of 6 hr data from 1 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[6:91:6,], data_goal[:85:6,])\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[92:109:2,], data_goal[90:107:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_c = NUM.subtract(data_goal[109:,], data_goal[108:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b, ro_6hr_c]) * area_sqm_npoints\r\n \r\n #remove negative values\r\n ro_stream[ro_stream<0] = 0\r\n data_temp[:,s] = ro_stream.sum(axis = 1)\r\n\r\n pointer += npoints\r\n\r\n\r\n '''Write inflow data'''\r\n print(\"Writing inflow data...\")\r\n var_m3_riv[:] = data_temp\r\n # close the input and output netcdf datasets\r\n data_in_nc.close()\r\n data_out_nc.close()", "def openMCSH5File(filename, verbose=False):\n rf = h5py.File(filename, 'r')\n \n stream = rf.require_group('/Data/Recording_0/AnalogStream/Stream_0')\n data = np.array(stream.get('ChannelData'),dtype=np.int)\n timestamps = np.array(stream.get('ChannelDataTimeStamps'))\n info = np.array(stream.get('InfoChannel'))\n \n Unit = info['Unit'][0]\n Tick = info['Tick'][0]/1e6\n exponent = info['Exponent'][0]\n convFact = info['ConversionFactor'][0]\n \n nRecCh, nFrames = data.shape\n channel_ids = info['ChannelID']\n assert len(np.unique(channel_ids)) == len(channel_ids), 'Duplicate MCS channel IDs found'\n electrodeLabels = info['Label']\n \n TimeVals = np.arange(timestamps[0][0],timestamps[0][2]+1,1)*Tick\n \n assert Unit==b'V', 'Unexpected units found, expected volts, found {}'.format(Unit.decode('UTF-8'))\n data_V = data*convFact.astype(float)*(10.0**(exponent))\n \n timestep_avg = np.mean(TimeVals[1:]-TimeVals[0:-1])\n timestep_std = np.std(TimeVals[1:]-TimeVals[0:-1])\n timestep_min = np.min(TimeVals[1:]-TimeVals[0:-1])\n timestep_max = np.min(TimeVals[1:]-TimeVals[0:-1])\n assert all(np.abs(np.array((timestep_min, timestep_max))-timestep_avg)/timestep_avg < 1e-6), 'Time steps vary by more than 1 ppm'\n samplingRate = 1./timestep_avg\n\n if verbose:\n print('# MCS H5 data format')\n print('#')\n print('# File: {}'.format(rf.filename))\n print('# File size: {:.2f} MB'.format(rf.id.get_filesize()/1024**2))\n print('#')\n for key in rf.attrs.keys():\n print('# {}: {}'.format(key,rf.attrs[key]))\n print('#')\n print('# Signal range: {:.2f} to {:.2f} µV'.format(np.amin(data_V)*1e6,np.amax(data_V)*1e6))\n print('# Number of channels: {}'.format(nRecCh))\n print('# Number of frames: {}'.format(nFrames))\n print('# Time step: {:.2f} µs ± {:.5f} % (range {} to {})'.format(timestep_avg*1e6, timestep_std/timestep_avg*100, timestep_min*1e6, timestep_max*1e6))\n print('# Sampling rate: {:.2f} Hz'.format(samplingRate))\n print('#')\n print('# MCSH5RecordingExtractor currently only reads /Data/Recording_0/AnalogStream/Stream_0')\n\n return (rf, nFrames, samplingRate, nRecCh, channel_ids, electrodeLabels, exponent, convFact)", "def test_simple_recov(self):\r\n\r\n # ND072022.PD0 contains a single ADCPA ensemble\r\n with open(os.path.join(RESOURCE_PATH, 'ND072022.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n particles = parser.get_records(1)\r\n \r\n log.debug('got back %d particles', len(particles))\r\n \r\n self.assert_particles(particles, 'ND072022_recov.yml', RESOURCE_PATH)", "def ncwrt_retrieval_obs_s2(retr_setup, outname=None):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'obs_s2.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #-- retrieval settings\n s2_table = retr_setup.obs_dct['S2']\n timepts = s2_table.geom.date_utc\n npts = len(timepts)\n s2_satid = np.array(s2_table.sat_id_lst, dtype=str)\n s2_data = s2_table.data\n s2_dataunc = s2_table.dataunc\n nt,nbands = s2_data.shape\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n d2 = ncfp.createDimension('nbands',nbands)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n #-- unit (in correct type)\n unit_one = np.array([1]).astype(s2_data.dtype)[0]\n\n # BRF\n ncvar = ncfp.createVariable( 'brf', np.float64, ('npoints','nbands'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'BRF top-of-canopy reflectances')\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n ncvar[:,:] = s2_data[:,:]\n\n # BRF uncertainty\n ncvar = ncfp.createVariable( 'brf_unc', np.float64, ('npoints','nbands'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'BRF top-of-canopy reflectances')\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n comment = \"BRF uncertainties are derived as {:.2f}[%] relative uncertainty \".format(\n 100.*retr_setup.s2_relunc)\n comment += \"and an uncertainty floor value of {:.4f} is applied.\".format(retr_setup.s2_uncfloor)\n ncvar.setncattr('comment', comment)\n ncvar[:,:] = s2_dataunc[:,:]\n\n # satellite identifier\n ncvar = ncfp.createVariable( 'satellite_id', str, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'satellite identifer')\n ncvar[:] = s2_satid[:]\n\n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def convert_input(filename, hdf5_data):\n x1 = []\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_TYPE, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(x1[idx].split()[0]))\n set_hdf5_attributes(dset, structure.H5_SOLVER_TYPE_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_RESTART, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(x1[idx].split()[0]))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_RESTART_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_STOPPING, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_STOPPING_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_MAX_ITERATIONS, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(x1[idx].split()[0]))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_MAX_ITERATIONS_ATTR)", "def test_solinst_colon_decimalsep(test_files_dir, testfile):\n solinst_file = hsr.SolinstFileReader(osp.join(test_files_dir, testfile))\n\n records = solinst_file.records\n assert len(records) == 10\n\n assert records.index.tolist()[0] == Timestamp('2016-11-23 19:00:00')\n assert records.iloc[0].iloc[0] == 1813.03\n assert records.iloc[0].iloc[1] == 9.182\n\n assert records.index.tolist()[-1] == Timestamp('2016-11-23 21:15:00')\n assert records.iloc[-1].iloc[0] == 1812.59\n assert records.iloc[-1].iloc[1] == 9.179\n\n assert list(records.columns) == [\"LEVEL_cm\", \"TEMPERATURE_°C\"]\n\n sites = solinst_file.sites\n assert sites.instrument_serial_number == \"2048469\"\n assert sites.project_name == \"03040008\"\n assert sites.site_name == \"Rougemont_Plus profond\"", "def test_extract_sensitivity_cube(tmpdir, datadir, datevshot):\n \n h5fn = datadir.join(\"test_hdf.h5\").strpath\n outfn = tmpdir.join(\"test.fits\").strpath \n \n if datevshot:\n args = [h5fn, \"--datevshot\", datevshot, \"063\", outfn]\n else:\n args = [h5fn, \"063\", outfn] \n\n extract_sensitivity_cube(args=args)\n \n assert isfile(outfn)", "def main(planckfile, dustfile, tomofile, colnames, names, pol, res,\\\n part='all', distcut=None):\n if (pol == 'P') or (pol == 'Q') or (pol == 'U'):\n polarisation = True\n elif (pol == 'p') or (pol == 'q') or (pol == 'u') or (pol == 'qu'):\n polarisation = True\n else:\n polarisation = False\n\n print(pol, polarisation)\n\n if distcut is None:\n distcut = 900\n\n if (polarisation is True):\n # read smoothed planck maps.\n print('load planck 353GHz data')\n # read_smooth_maps(filename, name, shape)\n IQU_smaps = smooth.read_smooth_maps(planckfile, names[0], 3)\n dust_smap = smooth.read_smooth_maps(dustfile, names[1], 1)[0]\n T_smap = IQU_smaps[0]\n Q_smap = IQU_smaps[1]\n U_smap = IQU_smaps[2]\n \n Nside = hp.get_nside(T_smap)\n print('Using Nside={}'.format(Nside))\n print(planckfile)\n band = planckfile.split('_')[2]\n if len(band) > 3:\n band = band[:3]\n if band == '15a':\n band = '353'\n print(band)\n\n if int(band) < 353:\n # load cmb intensity and subtract form polarization maps\n cmbfile = 'Data/IQU_Nside{}_CMB_10arcmin.h5'.format(Nside)\n cmbmaps = tools.Read_H5(cmbfile, 'IQU')*1e6\n Q_cmb = cmbmaps[1,:]\n U_cmb = cmbmaps[1,:]\n Q_smap = Q_smap - Q_cmb\n U_smap = U_smap - U_cmb\n \n print(np.mean(Q_smap), np.mean(U_smap))\n #sys.exit()\n # load tomography data:\n data = load.load_tomographydata(tomofile, colnames)\n print('Data loaded, using Nside={}'.format(Nside))\n\n p_map, q_map, u_map, sigma, r_map, pix =\\\n load.tomo_map(data, Nside, part=part, distcut=distcut)\n u_map = -u_map # to Healpix convention\n mask = np.unique(pix)\n print(len(mask))\n u_smap = smooth.smooth_tomo_map(u_map, mask, Nside, res)\n q_smap = smooth.smooth_tomo_map(q_map, mask, Nside, res)\n p_smap = smooth.smooth_tomo_map(p_map, mask, Nside, res)\n print('Tomography maps smoothed')\n print(np.mean(q_smap[mask]), np.mean(dust_smap[mask]), np.mean(Q_smap[mask]))\n dPsi = np.full(len(u_map), hp.UNSEEN)\n #sys.exit()\n\n l, b = tools.convert2galactic(data[:,0], data[:,1])\n theta, phi = hp.pix2ang(Nside, pix) \n lon = np.mean(phi)*180/np.pi\n lat = 90 - np.mean(theta)*180/np.pi\n print(lon, lat)\n\n x = 0.5*np.arctan2(U_smap[mask], Q_smap[mask])\n #x[x<0.] += np.pi\n #x[x>=np.pi] -= np.pi\n\n x_v = 0.5*np.arctan2(u_smap[mask], q_smap[mask])\n #psi_v[psi_v<0] += np.pi\n #psi_v[psi_v>=np.pi] -= np.pi \n print('Polarization angles of planck (mean, min, max) [deg]:')\n print(np.mean(x)*180/np.pi,np.min(x)*180/np.pi, np.max(x)*180/np.pi)\n print(np.mean(x_v)*180/np.pi,np.min(x_v)*180/np.pi,np.max(x_v)*180/np.pi)\n #print(np.mean(x+np.pi/2-psi_v))\n if (pol == 'P') or (pol == 'p'):\n print('-- P polarisation --')\n\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask],\\\n U_smap[mask],u_smap[mask])\\\n #, plot=True, name='smooth2')\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tot_res, frac_res, dust = tools.map_analysis_function(p_smap, T_smap,\\\n dust_smap, mask, Nside)\n\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif (pol == 'Q') or (pol == 'q'):\n print('-- Q polarisation --')\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask], U_smap[mask],\\\n u_smap[mask], plot=True)\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tot_res, frac_res, dust = tools.map_analysis_function(q_smap, Q_smap,\\\n dust_smap, mask, Nside)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif (pol == 'U') or (pol == 'u'):\n print('-- U polarisation --')\n print(len(u_smap))\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask],\\\n U_smap[mask],u_smap[mask], plot=True)\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tot_res, frac_res, dust = tools.map_analysis_function(u_smap, U_smap,\\\n dust_smap, mask, Nside)\n\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif (pol == 'QU') or (pol == 'qu'):\n print('-- Q,U polarisation --')\n print('Return: tomo, planck, dust, mask, dpsi, fullIQU, [lon,lat], r')\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask],\\\n U_smap[mask], u_smap[mask])\n #, plot=True, name=Nside)\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tomo = [q_smap, u_smap, p_smap, sigma[1], sigma[2], sigma[0]]\n planck = [Q_smap, U_smap]\n coord = [lon, lat]\n angles = [dPsi[mask], psi_v, psi_s, sigma[3]]\n return(tomo, planck, dust_smap, coord, full_IQU, mask, r_map, angles)\n\n\n else:\n # use unsmoothe maps\n print('Use non smoothed maps')\n # load planck\n print('load planck 353GHz data')\n\n #T, P, Q, U = load.load_planck_map(planckfile, p=True)\n data = load.load_planck_map(planckfile, p=True)\n d353 = load.load_planck_map(dustfile)\n sys.exit()\n dust353 = tools.Krj2Kcmb(d353) * 1e6\n T = T*1e6\n P = P*1e6\n Q = Q*1e6\n U = U*1e6\n Nside = hp.get_nside(T_smap)\n\n data = load.load_tomographydata(tomofile, colnames)\n p_map, q_map, u_map, sigma, r_map, pix = load.tomo_map(data, Nside)\n u_map = -u_map # to Healpix convention\n mask = np.unique(pix)\n\n l, b = tools.convert2galactic(data[:,0], data[:,1])\n lon = np.mean(l)\n lat = np.mean(b)\n\n dPsi = np.full(len(u_map), hp.UNSEEN)\n\n if Ppol == True:\n print('-- P polarisation --')\n psi = tools.delta_psi(Q[mask], q_map[mask], U[mask],\\\n u_map[mask], plot=True)\n dPsi[mask] = psi\n full_IQU = [T, Q, U]\n tot_res, frac_res, dust = tools.map_analysis_function(p_map, T,\\\n dust353, mask)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif Qpol == True:\n print('-- Q polarisation --')\n psi = tools.delta_psi(Q[mask], q_map[mask], U[mask],\\\n u_map[mask], plot=True)\n dPsi[mask] = psi\n full_IQU = [T, Q, U]\n tot_res, frac_res, dust = tools.map_analysis_function(q_map, Q,\\\n dust353, mask)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n if Upol == True:\n print('-- U polarisation --')\n psi = tools.delta_psi(Q[mask], q_map[mask], U[mask],\\\n u_map[mask], plot=True)\n dPsi[mask] = psi\n full_IQU = [T, Q, U]\n tot_res, frac_res, dust = tools.map_analysis_function(u_map, U,\\\n dust353, mask)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)", "def test():\n import os\n import ClearMap.ImageProcessing.SpotDetection as self\n reload(self)\n import ClearMap.IO as io \n import ClearMap.Settings as settings\n \n basedir = settings.ClearMapPath;\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/Data/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n fn = os.path.join(basedir, 'Test/Data/Synthetic/label_iDISCO_\\d{3}.tif');\n fn = os.path.join(basedir, 'Test/Data/OME/16-17-27_0_8X-s3-20HF_UltraII_C00_xyz-Table Z\\d{4}.ome.tif');\n #fn = '/run/media/ckirst/ChristophsBackuk4TB/iDISCO_2015_06/Adult cfos C row 20HF 150524.ims';\n #fn = '/home/nicolas/Windows/Nico/cfosRegistrations/Adult cfos C row 20HF 150524 - Copy.ims';\n #fn = '/home/ckirst/Science/Projects/BrainActivityMap/iDISCO_2015_04/test for spots added spot.ims'\n\n img = io.readData(fn);\n #img = dataset[0:500,0:500,1000:1008];\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[500:1500,500:1500,800:809]; \n img = img.astype('int16');\n \n #m = sys.modules['iDISCO.ImageProcessing.SpotDetection']\n #c = self.detectCells(img);\n \n c = self.detectCells(img, dogSize = None, cellShapeThreshold = 1, cellShapeFile = '/home/ckirst/Science/Projects/BrainActivityMap/Analysis/iDISCO/Test/Data/CellShape/cellshape_\\d{3}.tif');\n \n print ('done, found %d cells !' % c[0].shape[0])\n\n\n #test intensities:\n import numpy;\n x = numpy.random.rand(30,30,10);\n centers = numpy.array([[0,0,0], [29,29,9]]);\n i = self.findIntensity(x, centers, boxSize = (1,1,1));\n print (i)", "def _initialize_output(self, time_len, id_len):\r\n\r\n log('Initializing new file %s' % self.cf_compliant_file, 'INFO')\r\n \r\n self.cf_nc = Dataset(self.cf_compliant_file, 'w', format='NETCDF3_CLASSIC')\r\n \r\n # Create global attributes\r\n log(' globals', 'DEBUG', self.print_debug)\r\n self.cf_nc.featureType = 'timeSeries'\r\n self.cf_nc.Metadata_Conventions = 'Unidata Dataset Discovery v1.0'\r\n self.cf_nc.Conventions = 'CF-1.6'\r\n self.cf_nc.cdm_data_type = 'Station'\r\n self.cf_nc.nodc_template_version = (\r\n 'NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1')\r\n self.cf_nc.standard_name_vocabulary = ('NetCDF Climate and Forecast (CF) ' +\r\n 'Metadata Convention Standard Name ' +\r\n 'Table v28')\r\n self.cf_nc.title = 'RAPID Result'\r\n self.cf_nc.summary = (\"Results of RAPID river routing simulation. Each river \" +\r\n \"reach (i.e., feature) is represented by a point \" +\r\n \"feature at its midpoint, and is identified by the \" +\r\n \"reach's unique NHDPlus COMID identifier.\")\r\n self.cf_nc.time_coverage_resolution = 'point'\r\n self.cf_nc.geospatial_lat_min = 0.0\r\n self.cf_nc.geospatial_lat_max = 0.0\r\n self.cf_nc.geospatial_lat_units = 'degrees_north'\r\n self.cf_nc.geospatial_lat_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_lon_min = 0.0\r\n self.cf_nc.geospatial_lon_max = 0.0\r\n self.cf_nc.geospatial_lon_units = 'degrees_east'\r\n self.cf_nc.geospatial_lon_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_min = 0.0\r\n self.cf_nc.geospatial_vertical_max = 0.0\r\n self.cf_nc.geospatial_vertical_units = 'm'\r\n self.cf_nc.geospatial_vertical_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_positive = 'up'\r\n self.cf_nc.project = self.project_name\r\n self.cf_nc.processing_level = 'Raw simulation result'\r\n self.cf_nc.keywords_vocabulary = ('NASA/Global Change Master Directory ' +\r\n '(GCMD) Earth Science Keywords. Version ' +\r\n '8.0.0.0.0')\r\n self.cf_nc.keywords = 'DISCHARGE/FLOW'\r\n self.cf_nc.comment = 'Result time step(s) (seconds): ' + str(self.time_step_array)\r\n \r\n timestamp = datetime.utcnow().isoformat() + 'Z'\r\n self.cf_nc.date_created = timestamp\r\n self.cf_nc.history = (timestamp + '; added time, lat, lon, z, crs variables; ' +\r\n 'added metadata to conform to NODC_NetCDF_TimeSeries_' +\r\n 'Orthogonal_Template_v1.1')\r\n \r\n # Create dimensions\r\n log(' dimming', 'DEBUG', self.print_debug)\r\n self.cf_nc.createDimension('time', time_len)\r\n self.cf_nc.createDimension(self.output_id_dim_name, id_len)\r\n \r\n # Create variables\r\n log(' timeSeries_var', 'DEBUG', self.print_debug)\r\n timeSeries_var = self.cf_nc.createVariable(self.output_id_dim_name, 'i4', \r\n (self.output_id_dim_name,))\r\n timeSeries_var.long_name = (\r\n 'Unique NHDPlus COMID identifier for each river reach feature')\r\n timeSeries_var.cf_role = 'timeseries_id'\r\n \r\n log(' time_var', 'DEBUG', self.print_debug)\r\n time_var = self.cf_nc.createVariable('time', 'i4', ('time',))\r\n time_var.long_name = 'time'\r\n time_var.standard_name = 'time'\r\n time_var.units = 'seconds since 1970-01-01 00:00:00 0:00'\r\n time_var.axis = 'T'\r\n \r\n #only add if user adds\r\n if self.comid_lat_lon_z_file and os.path.exists(self.comid_lat_lon_z_file):\r\n log(' lat_var', 'DEBUG', self.print_debug)\r\n lat_var = self.cf_nc.createVariable('lat', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lat_var.long_name = 'latitude'\r\n lat_var.standard_name = 'latitude'\r\n lat_var.units = 'degrees_north'\r\n lat_var.axis = 'Y'\r\n \r\n log(' lon_var', 'DEBUG', self.print_debug)\r\n lon_var = self.cf_nc.createVariable('lon', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lon_var.long_name = 'longitude'\r\n lon_var.standard_name = 'longitude'\r\n lon_var.units = 'degrees_east'\r\n lon_var.axis = 'X'\r\n \r\n log(' z_var', 'DEBUG', self.print_debug)\r\n z_var = self.cf_nc.createVariable('z', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n z_var.long_name = ('Elevation referenced to the North American ' +\r\n 'Vertical Datum of 1988 (NAVD88)')\r\n z_var.standard_name = 'surface_altitude'\r\n z_var.units = 'm'\r\n z_var.axis = 'Z'\r\n z_var.positive = 'up'\r\n \r\n log(' crs_var', 'DEBUG', self.print_debug)\r\n crs_var = self.cf_nc.createVariable('crs', 'i4')\r\n crs_var.grid_mapping_name = 'latitude_longitude'\r\n crs_var.epsg_code = 'EPSG:4326' # WGS 84\r\n crs_var.semi_major_axis = 6378137.0\r\n crs_var.inverse_flattening = 298.257223563", "def test_ccds(self):\n #TODO write ccds tests", "def get_data(eh, file_list):\n x_pos = []\n y_pos = []\n x_vel = []\n y_vel = []\n z_vel = []\n unique_x = []\n unique_y = []\n\n # reading data\n for file in file_list:\n with open(file, 'r') as f:\n f.readline() # Ignores first line\n for line in f:\n line = line.strip()\n column = line.split()\n if len(column) == 4:\n if file == file_list[0]:\n # Only takes position data from first file as the same in each file\n x_pos.append(float(column[0]))\n y_pos.append(float(column[1]))\n\n x_vel.append(float(column[2]))\n y_vel.append(float(column[3]))\n z_vel.append(0.0)\n\n if float(column[0]) not in unique_x:\n unique_x.append(float(column[0]))\n if float(column[1]) not in unique_y:\n unique_y.append(float(column[1]))\n else:\n x_vel.append(float(column[2]))\n y_vel.append(float(column[3]))\n z_vel.append(0.0)\n else:\n print \"Error: TXT file is not correct!\"\n\n ux = len(unique_x)\n uy = len(unique_y)\n\n\n # xmid and ymid are used to get xz- and yz-planes. The median value is used. If the number of\n # unique xs and ys is even, then the median value will be one that does not correspond to a\n # measurement. When this is the case, the first value is ignored so that the number of uniques is\n # odd, resulting in a median value that corresponds to a measurement.\n if ux % 2 == 0:\n xmid = np.median(unique_x[1:])\n else:\n xmid = np.median(unique_x)\n\n if uy % 2 == 0:\n ymid = np.median(unique_y[1:])\n else:\n ymid = np.median(unique_y)\n\n if eh == exp_h_list[-1]:\n print \"All data read.\"\n\n\n # checks list lengths to ensure matching and then averages the velocities for all files\n # and then returns an array with position and average velocities\n if len(x_pos) == len(y_pos):\n pos_count = len(x_pos)\n if len(x_vel) == len(y_vel) and len(x_vel) == len(z_vel):\n vel_count = len(x_vel)\n nof = vel_count / pos_count # equals number of files for each height\n ax_vel, ay_vel, az_vel = avg_data_each_h(nof, pos_count, x_vel, y_vel, z_vel)\n\n if make_sg:\n subgrid_array = sub_grid(ux, x_pos, y_pos, eh, ax_vel, ay_vel, az_vel)\n return subgrid_array\n else:\n z_pos = [eh] * len(x_pos)\n return xmid, ymid, zip(x_pos, y_pos, z_pos, ax_vel, ay_vel, az_vel)\n else:\n print \"Error: different number of velocities!\"\n else:\n print \"Error: not all x-positions have a corresponding y-position!\"", "def test_calculate_r_i():\n \n with open(HsHis6_PEX5C_vs_HsPEX5C_calc_r_I, 'rb') as file: # load the list with expexcted data frames from .pkl file\n expected_list = pickle.load(file)\n \n test_object = fa.read_in_envision(data_csv=HsHis6_PEX5C_vs_HsPEX5C, platemap_csv=Hs_His6_PEX5C_vs_HsPEX5C_platemap, data_type='plate', size=384)\n test_object.background_correct()\n test_object.calculate_r_i(correct=True, plot_i=False, thr=50)\n \n pd.testing.assert_frame_equal(test_object.data_dict['repeat_1']['data']['i_raw'], expected_list[0], atol=1E-6)\n pd.testing.assert_frame_equal(test_object.data_dict['repeat_1']['data']['r_raw'], expected_list[1], atol=1E-6)\n pd.testing.assert_frame_equal(test_object.data_dict['repeat_1']['data']['i_corrected'], expected_list[2], atol=1E-6)\n pd.testing.assert_frame_equal(test_object.data_dict['repeat_1']['data']['r_corrected'], expected_list[3], atol=1E-6)\n pd.testing.assert_frame_equal(test_object.data_dict['repeat_1']['data']['i_percent'], expected_list[4], atol=1E-6)", "def run(self, dataset_dir, sensor_data, output_dir,\n pc_input=None, save_pc=None,\n gn_input=None, save_gn=None,\n software_cfg=None,\n tolPC=TOLPC, tol=TOL, tolA=TOLA, tolB=TOLB, tolU=TOLU,\n show=1,\n return_covariance=True):\n\n # Default to save residual data\n res = True\n\n ################################################################################################################\n # 1.\tRead Harmonisation Matchup Data\n ################################################################################################################\n\n print \"\\nOpening data...\"\n HData = open_matchup(dataset_dir)\n HData.setSensorData(sensor_data)\n print \"Complete\"\n\n print \"\\nData Info\"\n print \"==========\"\n print \"Reference Sensors - \", [str(sensor) for sensor in HData.idx['sensors']\n if sensor not in HData.idx[\"parameter_sensor\"]]\n print \"Harmonising Sensors - \", [str(sensor) for sensor in HData.idx['sensors']\n if sensor in HData.idx[\"parameter_sensor\"]]\n print \"Total Match-Ups - \", HData.idx['cNm'][-1]\n print \"Total Sensor State Data Values - \", HData.idx['idx'][-1]\n print \"Total Harmonisation Paramaters - \", len(HData.idx['parameter_sensor'])\n\n ################################################################################################################\n # 2.\tPerform harmonisation\n ################################################################################################################\n\n print \"\\nBeginning Harmonisation...\"\n Harmonisation = HarmonisationEIV()\n HarmonisationOutput = Harmonisation.run(HData,\n pc_input=pc_input, save_pc=save_pc,\n gn_input=gn_input, save_gn=save_gn,\n show=show)\n\n print \"Final Solution:\"\n print HarmonisationOutput.parameter\n print HarmonisationOutput.parameter_covariance_matrix\n\n ################################################################################################################\n # 3.\tWrite data to file\n ################################################################################################################\n\n print 'Writing data to file...'\n # Add metadata\n startDate = \"\"\n endDate = \"\"\n HarmonisationOutput.additional_attributes = {\"software\": software_cfg['software'],\n \"software_version\": software_cfg['version'],\n \"software_tag\": software_cfg['tag'],\n \"job_id\": software_cfg[\"job_id\"],\n \"matchup_dataset\": \"_\".join((software_cfg['matchup_dataset'],\n startDate, endDate))}\n\n # Write data\n fname_output = \"_\".join((\"harm\", software_cfg['software'], software_cfg['version'],\n software_cfg['tag'], software_cfg[\"job_id\"],\n software_cfg['matchup_dataset'])) + \".nc\"\n HarmonisationOutput.save(pjoin(output_dir, fname_output), save_residuals=res)\n\n print \"\\nOutput Data Written To:\"\n print \">\", output_dir", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def read(lookup_cnfg, lookup_qn, diagram, T, directory, verbose=0):\n\n data = []\n comb = True if diagram == 'C4+D' else False\n\n for cnfg in lookup_cnfg:\n # filename and path\n filename = directory + '/' + diagram + '_cnfg%i' % cnfg + '.h5'\n try:\n fh = h5py.File(filename, \"r\")\n except IOError:\n print 'file %s not found' % filename\n raise\n\n # to achieve hirarchical indexing for quantum numbers build DataFrame for\n # each loop seperately\n # TODO: is it necessary to build that completely or can that be \n # constructed by successively storing each operator with pd.HDFStore()?\n data_qn = DataFrame()\n# print DataFrame(lookup_p)\n# print DataFrame(lookup_g)\n\n for op in lookup_qn.index:\n p = lookup_qn.ix[op, ['p_{so}', 'p_{si}']]\n g = lookup_qn.ix[op, ['\\gamma_{so}', '\\gamma_{si}']]\n groupname = set_groupname(diagram, p, g)\n\n # read data from file as numpy array and interpret as complex\n # numbers for easier treatment\n try:\n tmp = np.asarray(fh[groupname]).view(complex)\n except KeyError:\n print(\"could not read %s for config %d\" % (groupname, cnfg))\n continue\n\n # in case diagram is C4+D perform last mutliplication of factorizing\n # traces\n # the file contains 4 numbers per time slice: ReRe, ReIm, ImRe, and ImIm,\n # here combined 2 complex number\n if comb:\n # reshaping so we can extract the data easier\n tmp = tmp.reshape((-1,2))\n # extracting right combination, assuming ImIm contains only noise\n dtmp = 1.j * (tmp[:,1].real + tmp[:,0].imag) + tmp[:,0].real\n tmp = dtmp.copy()\n\n # save data into data frame\n data_qn[op] = pd.DataFrame(tmp, columns=['re/im'])\n data.append(data_qn)\n data = pd.concat(data, keys=lookup_cnfg, axis=0, names=['cnfg', 'T'])\n\n if verbose:\n print '\\tfinished reading'\n\n return data.sort_index(level=[0,1])", "def test_compute_Sv_ek80_CW_complex(ek80_path):\n ek80_raw_path = str(\n ek80_path.joinpath('ar2.0-D20201210-T000409.raw')\n ) # CW complex\n echodata = ep.open_raw(ek80_raw_path, sonar_model='EK80')\n ds_Sv = ep.calibrate.compute_Sv(\n echodata, waveform_mode='CW', encode_mode='complex'\n )\n assert isinstance(ds_Sv, xr.Dataset) is True\n ds_TS = ep.calibrate.compute_TS(\n echodata, waveform_mode='CW', encode_mode='complex'\n )\n assert isinstance(ds_TS, xr.Dataset) is True", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def test_header_update5(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocyw05afq_raw.fits\")\n self.get_data(\"input\", \"ocyw05afq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocyw05afq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocyw05afq HST/STIS G430L 0.2X0.09 ACQ/PEAK-UP\\n\" \\\n \"prop: 14084 visit: 05 line: 2 target: BD-11D916\\n\" \\\n \"obs date, time: 2016-09-22 08:33:17 exposure time: 1.80\\n\" \\\n \"dom GS/FGS: S2AE000156F1 sub-dom GS/FGS: S2AE000086F2\\n\" \\\n \"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(1022,32) corner=(26,500)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Scan type: LINEARAXIS2 Step size (mas): 150\\n\" \\\n \"\\n\" \\\n \" [ 5139 67252 0]\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: 0.0 0.0 0.000 0.000 0.000 0.000\\n\" \\\n \"Flux in post-slew confirmation image (907707) - Pedestal (838752) = 68955 DN\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The confirmation image has a flux between 0.8 and 2.0 times the\\n\" \\\n \"maximum flux in the peakup, which is typical of a successful ACQ/PEAK.\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocyw05afq_raw.fits\", \"ocyw05afq_raw_ref.fits\")]\n self.compare_outputs(outputs)", "def test_sim(self):\n nxfe = 4\n ipopt = get_solver(\"ipopt\")\n\n m_steady = self.make_steady_model(nfe=nxfe)\n self.fix_model_inlets(m_steady, inlet_pressure=50.0 * pyo.units.bar)\n m_steady.fs.compressor.boost_pressure[:].fix(7.0 * pyo.units.bar)\n ipopt.solve(m_steady, tee=True)\n time_steady = m_steady.fs.time\n scalar_data = self.get_scalar_data_from_model(m_steady, time_steady)\n initial_data = self.get_data_from_model_at_time(m_steady, time_steady)\n\n m = pyo.ConcreteModel()\n default = {\n \"dynamic\": True,\n \"time_set\": [0.0, 20.0],\n \"time_units\": pyo.units.hr,\n }\n m.fs = idaes.FlowsheetBlock(**default)\n m.fs.properties = NaturalGasParameterBlock()\n pipeline_config = {\n \"property_package\": m.fs.properties,\n \"finite_elements\": nxfe,\n }\n m.fs.pipeline = GasPipeline(**pipeline_config)\n pipeline = m.fs.pipeline\n compressor_config = {\"property_package\": m.fs.properties}\n m.fs.compressor = Compressor(**compressor_config)\n compressor = m.fs.compressor\n m._compressor_to_pipeline = Arc(\n ports=(compressor.outlet_port, pipeline.inlet_port),\n )\n expand_arcs = pyo.TransformationFactory(\"network.expand_arcs\")\n expand_arcs.apply_to(m)\n\n cv = m.fs.pipeline.control_volume\n assert_units_consistent(m)\n\n disc = pyo.TransformationFactory(\"dae.finite_difference\")\n ntfe = 20\n disc.apply_to(m, nfe=ntfe, wrt=m.fs.time, scheme=\"BACKWARD\")\n\n time = m.fs.time\n t0 = m.fs.time.first()\n x0 = cv.length_domain.first()\n xf = cv.length_domain.last()\n j = next(iter(m.fs.properties.component_list))\n\n # Fix geometry variables\n m.fs.pipeline.diameter.fix(0.92 * pyo.units.m)\n cv.length.fix(300.0 * pyo.units.km)\n\n # Fix boost pressure\n compressor.boost_pressure[:].fix()\n\n # Inlets to the compressor are fixed, except for flow, where\n # the outlet is fixed.\n state = compressor.inlet_state\n state[:].pressure.fix()\n state[:].mole_frac_comp[j].fix()\n state[:].temperature.fix()\n cv.flow_mass[:, xf].fix()\n\n # Fix initial conditions. Here, pressure and volume for all\n # non-specified points.\n for x in cv.length_domain:\n if x != x0:\n cv.pressure[t0, x].fix()\n if x != xf:\n cv.flow_mass[t0, x].fix()\n\n # I want to deactivate differential equations at (t0, xf)\n # Material balance already doesn't exist here.\n cv.momentum_balance[t0, xf].deactivate()\n\n self.assertEqual(degrees_of_freedom(m), 0)\n\n # Load initial steady state into model at all time points.\n for name, val in initial_data.items():\n var = m.find_component(name)\n for t in time:\n var[t].set_value(val)\n # Load scalar data from initial steady state\n # (initialize area, basically)\n for name, val in scalar_data.items():\n var = m.find_component(name)\n var.set_value(val)\n\n cv.material_accumulation[...].set_value(0.0)\n cv.flow_mass_dt[...].set_value(0.0)\n\n for con in large_residuals_set(m):\n resid = pyo.value(con.body - con.upper)\n print(resid, con.name)\n ipopt.solve(m, tee=True)\n\n # Load input sequence into model\n sample_points = [4.0, 20.0]\n input_name = \"fs.pipeline.control_volume.flow_mass[*,1.0]\"\n nominal_density = 0.72\n val = 12.0 * 1e6 / 24 * nominal_density # 12 (1e6 SCM)/day\n input_series_data = (\n sample_points,\n {input_name: [val, val]},\n )\n input_interval_data = interval_data_from_time_series(input_series_data)\n load_inputs_into_model(m, time, input_interval_data)\n # Solve with loaded inputs\n res = ipopt.solve(m, tee=True)\n self.assertIs(\n res.solver.termination_condition,\n pyo.TerminationCondition.optimal,\n )\n\n # These predicted values come from a simulation of a single pipeline\n # model from the Pyomo DAE example. flow_mass has been converted\n # to kg/hr from (1e4 SCM/hr) by a factor of 0.72*1e4, where\n # 0.72 kg/m**3 is the gas density at standard conditions.\n pred_values = (\n list(time),\n {\n \"fs.pipeline.control_volume.flow_mass[*,%s]\"\n % x0: [\n 3.000e5,\n 2.999e5,\n 2.999e5,\n 2.999e5,\n 3.000e5,\n 3.174e5,\n 3.301e5,\n 3.389e5,\n 3.449e5,\n 3.492e5,\n 3.523e5,\n 3.544e5,\n 3.560e5,\n 3.571e5,\n 3.579e5,\n 3.585e5,\n 3.589e5,\n 3.592e5,\n 3.594e5,\n 3.595e5,\n 3.597e5,\n ],\n \"fs.pipeline.control_volume.pressure[*,%s]\"\n % xf: [\n 50.90,\n 50.90,\n 50.90,\n 50.90,\n 50.90,\n 49.83,\n 49.31,\n 48.95,\n 48.69,\n 48.51,\n 48.38,\n 48.29,\n 48.22,\n 48.17,\n 48.14,\n 48.11,\n 48.10,\n 48.08,\n 48.07,\n 48.07,\n 48.06,\n ],\n \"fs.compressor.power[*]\": [\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.682e3,\n 1.750e3,\n 1.796e3,\n 1.828e3,\n 1.851e3,\n 1.867e3,\n 1.878e3,\n 1.887e3,\n 1.892e3,\n 1.897e3,\n 1.900e3,\n 1.902e3,\n 1.904e3,\n 1.905e3,\n 1.906e3,\n 1.906e3,\n ],\n },\n )\n output_names = [\n \"fs.pipeline.control_volume.flow_mass[*,%s]\" % x0,\n \"fs.pipeline.control_volume.pressure[*,%s]\" % xf,\n \"fs.compressor.power[*]\",\n ]\n actual_values = (\n list(time),\n {\n name: [var.value for var in m.find_component(name).values()]\n for name in output_names\n },\n )\n # Note: We fail with a reltol of 0.01, due to flow rate discrepancies\n # in positions 6, 7, 8, and 9. A reltol of 0.02 seems reasonable to me.\n self.assertStructuredAlmostEqual(pred_values, actual_values, reltol=0.02)", "def loadData(fname='Unstra.out2.00008.athdf'):\n #data=ath.athdf(fname,quantities=['B1','B2','B3'])\n time,data=ath.athdf(fname,quantities=['Bcc1'])\n bx = data['Bcc1']\n time,data=ath.athdf(fname,quantities=['Bcc2'])\n by = data['Bcc2']\n time,data=ath.athdf(fname,quantities=['Bcc3'])\n bz = data['Bcc3']\n x = data['x1f']\n y = data['x2f']\n z = data['x3f']\n # refinement\n rfac = 1.0\n ##if bx.shape[0] < 512:\n ## nz,ny,nx = bx.shape\n ## rfac = int(512/bx.shape[0])\n ## bx = np.repeat(bx,rfac,axis=0)\n ## bx = np.repeat(bx,rfac,axis=1)\n ## bx = np.repeat(bx,rfac,axis=2)\n ## by = np.repeat(by,rfac,axis=0)\n ## by = np.repeat(by,rfac,axis=1)\n ## by = np.repeat(by,rfac,axis=2)\n ## bz = np.repeat(bz,rfac,axis=0)\n ## bz = np.repeat(bz,rfac,axis=1)\n ## bz = np.repeat(bz,rfac,axis=2)\n # ---\n def curl(vx,vy,vz,dx,dy,dz):\n [dzvx,dyvx,dxvx] = np.gradient(vx)\n [dzvy,dyvy,dxvy] = np.gradient(vy)\n [dzvz,dyvz,dxvz] = np.gradient(vz)\n cx = dyvz/dy-dzvy/dz\n cy = dzvx/dz-dxvz/dx\n cz = dxvy/dx-dyvx/dy\n # No need to del the reference by one manually\n # allow python to perform its own garbage collection\n # after the function return cxyz\n #del dzvx\n #del dzvy\n #del dzvz\n return cx,cy,cz\n # ---\n dx = dz = (x[1]-x[0])/rfac\n dy = (y[1]-y[0])/rfac\n jx,jy,jz = curl(bx,by,bz,dx,dy,dz)\n j2 = jx**2+jy**2+jz**2\n return j2", "def test_radar_request_site_historic_sweep_pcp_v_hdf5_yesterday(default_settings):\n\n timestamp = dt.datetime.utcnow() - dt.timedelta(days=1)\n\n request = DwdRadarValues(\n parameter=DwdRadarParameter.SWEEP_PCP_VELOCITY_H,\n start_date=timestamp,\n site=DwdRadarSite.BOO,\n fmt=DwdRadarDataFormat.HDF5,\n subset=DwdRadarDataSubset.SIMPLE,\n settings=default_settings,\n )\n results = list(request.query())\n\n if len(results) == 0:\n raise pytest.skip(\"Data currently not available\")\n\n # Verify number of elements.\n assert len(results) == 1\n\n # Get payload.\n buffer = results[0][1]\n payload = buffer.getvalue()\n\n # Verify data.\n assert payload.startswith(b\"\\x89HDF\\r\\n\")\n\n # Verify more details.\n # h5dump ras07-stqual-pcpng01_sweeph5onem_vradh_00-2020093000403400-boo-10132-hd5\n\n hdf = h5py.File(buffer, \"r\")\n\n assert hdf[\"/how/radar_system\"] is not None\n assert hdf[\"/how\"].attrs.get(\"task\") == b\"Sc_Pcp-NG-01_BOO\"\n assert hdf[\"/what\"].attrs.get(\"source\") == b\"WMO:10132,NOD:deboo\"\n\n assert hdf[\"/how\"].attrs.get(\"scan_count\") == 1\n assert hdf[\"/dataset1/how\"].attrs.get(\"scan_index\") == 1\n\n assert hdf[\"/dataset1/data1/data\"].shape in ((360, 600), (359, 600), (358, 600), (357, 600))\n\n timestamp = round_minutes(request.start_date, 5)\n assert hdf[\"/what\"].attrs.get(\"date\") == bytes(timestamp.strftime(\"%Y%m%d\"), encoding=\"ascii\")\n assert hdf[\"/what\"].attrs.get(\"time\").startswith(bytes(timestamp.strftime(\"%H%M\"), encoding=\"ascii\"))", "def test_Lueker_Data_Csys(self):\n ld = pd.read_csv(\n \"tests/test_data/Lueker2000/Lueker2000_Table3.csv\", comment=\"#\"\n )\n\n # Calculate using cbsys\n # TA from DIC and fCO2\n cTA = Csys(\n DIC=ld.DIC.values,\n fCO2=ld.fCO2.values,\n T_in=ld.Temp.values,\n S_in=ld.Sal.values,\n )\n dTA = ld.TA - cTA.TA\n dTA_median = np.median(dTA)\n dTA_pc95 = np.percentile(dTA, [2.5, 97.5])\n self.assertLessEqual(abs(dTA_median), 2.5, msg=\"TA Offset <= 2.5\")\n self.assertTrue(all(abs(dTA_pc95 - dTA_median) <= 16), msg=\"TA 95% Conf <= 16\")\n\n # fCO2 from TA and DIC\n cfCO2 = Csys(\n TA=ld.TA.values, DIC=ld.DIC.values, T_in=ld.Temp.values, S_in=ld.Sal.values\n )\n dfCO2 = ld.fCO2 - cfCO2.fCO2\n dfCO2_median = np.median(dfCO2)\n # dfCO2_pc95 = np.percentile(dfCO2, [2.5, 97.5])\n dfCO2_percent_offset = 100 * dfCO2 / ld.fCO2\n self.assertLessEqual(dfCO2_median, 2.5, msg=\"fCO2 Offset <= 2.5\")\n self.assertLessEqual(np.std(dfCO2_percent_offset), 3, msg=\"fCO2 STD within 3%\")\n # print(dfCO2_pc95)\n # self.assertTrue(all(abs(dfCO2_pc95) <= 70), msg='fCO2 95% Conc <= 70')\n\n # DIC from TA and fCO2\n cDIC = Csys(\n TA=ld.TA.values,\n fCO2=ld.fCO2.values,\n T_in=ld.Temp.values,\n S_in=ld.Sal.values,\n )\n dDIC = ld.DIC - cDIC.DIC\n dDIC_median = np.median(dDIC)\n dDIC_pc95 = np.percentile(dDIC, [2.5, 97.5])\n self.assertLessEqual(abs(dDIC_median), 2, msg=\"DIC Offset <= 2\")\n self.assertTrue(all(abs(dDIC_pc95) <= 15), msg=\"DIC 95% Conc <= 15\")\n\n return", "def test_sky_coord_classic(self):\n sc_cen = SkyCoord(SCS_CENTER)\n tab = conesearch.conesearch(\n sc_cen, SCS_RADIUS, catalog_db=self.url, verbose=self.verbose)\n assert len(tab) > 0", "def test_GLODAPv2_Csys(self):\n # load GLODAP data\n gd = pd.read_csv(\"tests/test_data/GLODAP_data/GLODAPv2_pH_DIC_ALK_subset.csv\")\n gd.dropna(\n subset=[\n \"phtsinsitutp\",\n \"temperature\",\n \"salinity\",\n \"tco2\",\n \"talk\",\n \"pressure\",\n \"phosphate\",\n \"silicate\",\n ],\n inplace=True,\n )\n gd.pressure /= 10 # convert pressure to bar\n\n # set negative nutrient values to zero\n gd.phosphate[gd.phosphate < 0] = 0\n gd.silicate[gd.silicate < 0] = 0\n \n # exclude weird cruise 270 data\n gd = gd.loc[gd.cruise != 270]\n\n # calculate pH from TA and DIC\n cpH = Csys(\n TA=gd.talk,\n DIC=gd.tco2,\n T_in=gd.temperature,\n S_in=gd.salinity,\n P_in=gd.pressure,\n PT=gd.phosphate,\n SiT=gd.silicate,\n BT=415.7,\n )\n pH_resid = gd.phtsinsitutp - cpH.pHtot\n pH_median = np.median(pH_resid)\n pH_pc95 = np.percentile(pH_resid, [2.5, 97.5])\n\n self.assertLessEqual(abs(pH_median), 0.005, msg=\"pH Offset <= 0.005\")\n self.assertTrue(all(abs(pH_pc95) <= 0.05), msg=\"pH 95% Conf <= 0.05\")\n\n # calculate TA from pH and DIC\n cTA = Csys(\n pHtot=gd.phtsinsitutp,\n DIC=gd.tco2,\n T_in=gd.temperature,\n S_in=gd.salinity,\n P_in=gd.pressure,\n PT=gd.phosphate,\n SiT=gd.silicate,\n BT=415.7,\n )\n TA_resid = gd.talk - cTA.TA\n TA_median = np.median(TA_resid)\n TA_pc95 = np.percentile(TA_resid, [2.5, 97.5])\n\n self.assertLessEqual(abs(TA_median), 0.5, msg=\"TA Offset <= 0.5\")\n self.assertTrue(all(abs(TA_pc95) < 13), msg=\"TA 95% Conf <= 13\")\n\n # calculate DIC from TA and pH\n cDIC = Csys(\n pHtot=gd.phtsinsitutp,\n TA=gd.talk,\n T_in=gd.temperature,\n S_in=gd.salinity,\n P_in=gd.pressure,\n PT=gd.phosphate,\n SiT=gd.silicate,\n BT=415.7,\n )\n DIC_resid = gd.tco2 - cDIC.DIC\n DIC_median = np.median(DIC_resid)\n DIC_pc95 = np.percentile(DIC_resid, [2.5, 97.5])\n\n self.assertLessEqual(abs(DIC_median), 0.5, msg=\"DIC Offset <= 0.5\")\n self.assertTrue(all(abs(DIC_pc95) < 13), msg=\"DIC 95% Conf <= 13\")\n\n return", "def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")", "def ncwrt_retrieval_config( retr_setup, outname=None ):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'retrconfig.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #--\n schedule_dct = retr_setup.schedule_dct\n statevector = retr_setup.prstate\n #-- turn list into array\n sim_typ = np.array(schedule_dct['sim_typ'], dtype=np.int32)\n timepts = schedule_dct['date_utc']\n nstvar,npts = statevector.shape\n #-- overpass geometries SZA,SAA,VZA,VAA\n ivgeom = np.empty((npts,4), dtype=np.float64)\n ivgeom[:,0] = schedule_dct['sza']\n ivgeom[:,1] = schedule_dct['saa']\n ivgeom[:,2] = schedule_dct['vza']\n ivgeom[:,3] = schedule_dct['vaa']\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n d2 = ncfp.createDimension('ngeo',4)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', statevector.dtype, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n #-- simulation type\n ncvar = ncfp.createVariable( 'sim_typ', sim_typ.dtype, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar[:] = sim_typ[:]\n ncvar.setncattr('long_name','simulation_type')\n ncvar.setncattr('comment', 'integer value which is to be bit-interpreted')\n ncvar.setncattr('nobits_set', 'time-point with other state')\n ncvar.setncattr('bit0_is_set', 'time-point for S1 simulation')\n ncvar.setncattr('bit1_is_set', 'time-point for S2 simulation')\n ncvar.setncattr('bit2_is_set', 'time-point for S1A simulation')\n ncvar.setncattr('bit3_is_set', 'time-point for S1B simulation')\n ncvar.setncattr('bit4_is_set', 'time-point for S2A simulation')\n ncvar.setncattr('bit5_is_set', 'time-point for S2B simulation')\n \n #-- illumination-view geometry\n ncvar = ncfp.createVariable( 'ivgeom', ivgeom.dtype, ('npoints','ngeo'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('sza','igeo: 0')\n ncvar.setncattr('saa','igeo: 1')\n ncvar.setncattr('vza','igeo: 2')\n ncvar.setncattr('vaa','igeo: 3')\n ncvar[:,:] = ivgeom[:,:]\n \n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def test_NeuroPath1(self):\n \n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import urllib\n downloads = (\n ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),\n )\n \n for url,name,loader in downloads:\n filePath = slicer.app.temporaryPath + '/' + name\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\n logging.info('Requesting download %s from %s...\\n' % (name, url))\n urllib.urlretrieve(url, filePath)\n if loader:\n logging.info('Loading %s...' % (name,))\n loader(filePath)\n self.delayDisplay('Finished with download and loading')\n \n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = NeuroPathLogic()\n self.assertTrue( logic.hasImageData(volumeNode) )\n self.delayDisplay('Test passed!')", "def test_run_read(self):\n\n self.ictrl[0] = 1 + 2 + 4 + 8\n vmec_f90wrap.runvmec(self.ictrl, self.filename, self.verbose, \\\n self.fcomm, reset_file)\n\n self.assertTrue(self.ictrl[1] in success_codes)\n\n self.assertEqual(vmec_f90wrap.vmec_input.nfp, 3)\n self.assertEqual(vmec_f90wrap.vmec_input.mpol, 4)\n self.assertEqual(vmec_f90wrap.vmec_input.ntor, 3)\n print('rbc.shape:', vmec_f90wrap.vmec_input.rbc.shape)\n print('rbc:',vmec_f90wrap.vmec_input.rbc[101:103, 0:4])\n\n # n = 0, m = 0:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.rbc[101,0], 1.3782)\n\n # n = 0, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[101,1], 4.6465E-01)\n\n # n = 1, m = 1:\n self.assertAlmostEqual(vmec_f90wrap.vmec_input.zbs[102,1], 1.6516E-01)\n\n # Now try reading in the output\n wout_file = os.path.join(os.path.dirname(__file__), 'wout_li383_low_res.nc')\n ierr = 0\n vmec_f90wrap.read_wout_mod.read_wout_file(wout_file, ierr)\n self.assertEqual(ierr, 0)\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.betatot, \\\n 0.0426215030653306, places=4)\n\n print('iotaf.shape:',vmec_f90wrap.read_wout_mod.iotaf.shape)\n print('rmnc.shape:',vmec_f90wrap.read_wout_mod.rmnc.shape)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.iotaf[-1], \\\n 0.654868168783638, places=4)\n\n self.assertAlmostEqual(vmec_f90wrap.read_wout_mod.rmnc[0, 0], \\\n 1.4773028173065, places=4)", "def test_h5file(dl2_shower_geometry_file):\n from ctapipe.io.tableloader import TableLoader\n\n # no input raises error\n with pytest.raises(ValueError):\n with TableLoader():\n pass\n\n # test we can use an already open file\n with tables.open_file(dl2_shower_geometry_file, mode=\"r+\") as h5file:\n with TableLoader(h5file=h5file) as loader:\n assert 25 in loader.subarray.tel\n loader.read_subarray_events()\n loader.read_telescope_events()", "def _write_nemo_hr_file(rpn_hr_ds_path, nemo_hr_ds_path):\n with xarray.open_dataset(rpn_hr_ds_path) as rpn_hr:\n logging.debug(\n f\"calculating specific humidity & incoming longwave radiation from {rpn_hr_ds_path}\"\n )\n qair, ilwr, rh = _calc_qair_ilwr(rpn_hr)\n u_out, v_out = _rotate_winds(rpn_hr)\n data_vars = {\n \"nav_lon\": rpn_hr.nav_lon,\n \"nav_lat\": rpn_hr.nav_lat,\n # [:, 0] drops z dimension that NEMO will not tolerate\n \"qair\": qair[:, 0],\n \"RH_2maboveground\": rh[:, 0],\n \"therm_rad\": ilwr[:, 0],\n \"u_wind\": u_out[:, 0],\n \"v_wind\": v_out[:, 0],\n # \"LHTFL_surface\": ** needs to be calculated**,\n }\n nemo_rpn_vars = (\n (\"atmpres\", \"PN\"),\n (\"percentcloud\", \"NT\"),\n (\"PRATE_surface\", \"RT\"),\n (\"precip\", \"PR\"),\n (\"solar\", \"FB\"),\n (\"tair\", \"TT\"),\n )\n missing_vars = \"\"\n for nemo_var, rpn_var in nemo_rpn_vars:\n try:\n # [:, 0] drops z dimension that NEMO will not tolerate\n data_vars.update({nemo_var: getattr(rpn_hr, rpn_var)[:, 0]})\n except AttributeError:\n # Variable is missing from RPN dataset, so provide a placeholder DataArray\n # full of NaNs that we will deal with later via interpolation\n data_vars.update(\n {nemo_var: xarray.DataArray(numpy.full_like(qair[:, 0], numpy.nan))}\n )\n missing_vars = (\n \", \".join((missing_vars, nemo_var)) if missing_vars else nemo_var\n )\n logging.warning(f\"missing RPN variable {rpn_var} from {rpn_hr_ds_path}\")\n nemo_hr = xarray.Dataset(\n data_vars=data_vars, coords=rpn_hr.coords, attrs=rpn_hr.attrs\n )\n nemo_hr.attrs[\"history\"] += (\n f\"\\n{arrow.now().format('ddd MMM DD HH:mm:ss YYYY')}: \"\n f\"Add specific and relative humidity and incoming longwave radiation variables from \"\n f\"correlations\"\n )\n if missing_vars:\n nemo_hr.attrs[\"missing_variables\"] = missing_vars\n _add_vars_metadata(nemo_hr)\n _write_netcdf_file(nemo_hr, nemo_hr_ds_path)", "def test_imsim():\n import yaml\n import astropy.units as u\n import matplotlib.pyplot as plt\n from tqdm import tqdm\n # Need these for `eval` below\n from numpy import array\n import coord\n\n with open(DATA_DIR / \"wcs_466749.yaml\", 'r') as f:\n wcss = yaml.safe_load(f)\n\n cmds = {}\n with open(DATA_DIR / \"phosim_cat_466749.txt\", 'r') as f:\n for line in f:\n k, v = line.split()\n try:\n v = int(v)\n except ValueError:\n try:\n v = float(v)\n except ValueError:\n pass\n cmds[k] = v\n\n # Values below (and others) from phosim_cat_466749.txt\n rc = cmds['rightascension']\n dc = cmds['declination']\n boresight = galsim.CelestialCoord(\n rc*galsim.degrees,\n dc*galsim.degrees\n )\n obstime = Time(cmds['mjd'], format='mjd', scale='tai')\n obstime -= 15*u.s\n band = \"ugrizy\"[cmds['filter']]\n wavelength_dict = dict(\n u=365.49,\n g=480.03,\n r=622.20,\n i=754.06,\n z=868.21,\n y=991.66\n )\n wavelength = wavelength_dict[band]\n camera = imsim.get_camera()\n\n rotTelPos = cmds['rottelpos'] * galsim.degrees\n telescope = imsim.load_telescope(f\"LSST_{band}.yaml\", rotTelPos=rotTelPos)\n # Ambient conditions\n # These are a guess.\n temperature = 293.\n pressure = 69.0\n H2O_pressure = 1.0\n\n # Start by constructing a refractionless factory, which we can use to\n # cross-check some of the other values in the phosim cmd file.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=0.0,\n H2O_pressure=H2O_pressure\n )\n\n aob, zob, hob, dob, rob, eo = factory._ICRF_to_observed(\n boresight.ra.rad, boresight.dec.rad, all=True\n )\n np.testing.assert_allclose(\n np.rad2deg(aob)*3600, cmds['azimuth']*3600,\n rtol=0, atol=2.0\n )\n np.testing.assert_allclose(\n (90-np.rad2deg(zob))*3600, cmds['altitude']*3600,\n rtol=0, atol=6.0,\n )\n q = factory.q * galsim.radians\n rotSkyPos = rotTelPos - q\n # Hmmm.. Seems like we ought to be able to do better than 30 arcsec on the\n # rotator? Maybe this is defined at a different point in time? Doesn't seem\n # to affect the final WCS much though.\n np.testing.assert_allclose(\n rotSkyPos.deg*3600, cmds['rotskypos']*3600,\n rtol=0, atol=30.0,\n )\n\n # We accidentally simulated DC2 with the camera rotated 180 degrees too far.\n # That includes the regression test data here. So to fix the WCS code, but\n # still use the same regression data, we need to add 180 degrees here. Just\n # rotate the camera by another 180 degrees\n telescope = telescope.withLocallyRotatedOptic(\n \"LSSTCamera\", batoid.RotZ(np.deg2rad(180))\n )\n\n # For actual WCS check, we use a factory that _does_ know about refraction.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=pressure,\n H2O_pressure=H2O_pressure\n )\n\n do_plot = False\n my_centers = []\n imsim_centers = []\n if do_plot:\n _, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 12))\n i = 0\n r1 = []\n d1 = []\n r2 = []\n d2 = []\n rng = np.random.default_rng(1234)\n for k, v in tqdm(wcss.items()):\n name = k[18:25].replace('-', '_')\n det = camera[name]\n cpix = det.getCenter(cameraGeom.PIXELS)\n\n wcs = factory.getWCS(det, order=2)\n wcs1 = eval(v)\n # Need to adjust ab parameters to new GalSim convention\n wcs1.ab[0,1,0] = 1.0\n wcs1.ab[1,0,1] = 1.0\n\n my_centers.append(wcs.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n imsim_centers.append(wcs1.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n\n corners = det.getCorners(cameraGeom.PIXELS)\n xs = np.array([corner.x for corner in corners])\n ys = np.array([corner.y for corner in corners])\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n if i == 0:\n labels = ['batoid', 'PhoSim']\n else:\n labels = [None]*2\n if do_plot:\n ax.plot(ra1, dec1, c='r', label=labels[0])\n ax.plot(ra2, dec2, c='b', label=labels[1])\n\n # add corners to ra/dec check lists\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n # Add some random points as well\n xs = rng.uniform(0, 4000, 100)\n ys = rng.uniform(0, 4000, 100)\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n i += 1\n\n if do_plot:\n ax.legend()\n xlim = ax.get_xlim()\n ax.set_xlim(xlim[1], xlim[0])\n plt.show()\n\n dist = sphere_dist(r1, d1, r2, d2)\n print(\"sphere dist mean, max, std\")\n print(\n np.rad2deg(np.mean(dist))*3600,\n np.rad2deg(np.max(dist))*3600,\n np.rad2deg(np.std(dist))*3600,\n )\n np.testing.assert_array_less(\n np.rad2deg(np.mean(dist))*3600,\n 5.0\n )\n if do_plot:\n plt.hist(np.rad2deg(dist)*3600, bins=100)\n plt.show()\n\n if do_plot:\n r1 = np.array([c.ra.rad for c in my_centers])\n d1 = np.array([c.dec.rad for c in my_centers])\n r2 = np.array([c.ra.rad for c in imsim_centers])\n d2 = np.array([c.dec.rad for c in imsim_centers])\n cd = np.cos(np.deg2rad(cmds['declination']))\n q = plt.quiver(r1, d1, np.rad2deg(r1-r2)*3600*cd, np.rad2deg(d1-d2)*3600)\n plt.quiverkey(q, 0.5, 1.1, 5.0, \"5 arcsec\", labelpos='E')\n plt.show()", "def write_cfradial(output_filename, start_dt, minutes, timeres_s = 5, \n altres_m =60, maxtimeslice_td=datetime.timedelta(seconds=30*60), \n instrument='gvhsrl', min_alt_m=0, max_alt_m=5000,store_calibrations=False):\n\n cdl = locate_file('hsrl_cfradial.cdl', forModule=lgtb)\n print 'CDL = ', cdl\n timeres_td = datetime.timedelta(seconds=timeres_s)\n\n netcdf = Dataset(output_filename, 'w', clobber=True)\n delta = datetime.timedelta(minutes=minutes)\n timeres_delta = datetime.timedelta(seconds=timeres_s)\n end_dt = start_dt + delta\n\n gen = dpl_hsrl(instrument)\n\n if store_calibrations: # to store calibrations, newer actors are needed, as well as the precall methods (FIXME better design)\n import maestro.netcdf_precall as npc\n args=[]\n kwargs=dict(output=netcdf,template=cdl,usecfradial=True,basetime=start_dt)\n x=npc.addConstantsToParms(npc.addCalibrationsToNetCDF())\n hsrlnar=gen(start_dt, end_dt, timeres_timedelta=timeres_delta, min_alt_m=min_alt_m, max_alt_m=max_alt_m, altres_m=altres_m)\n x(hsrlnar,args,kwargs)\n nar=artists.dpl_netcdf_artist(hsrlnar,*args,**kwargs)\n #framestream,template,outputfilename=None,format=None,usecfradial=None,selected_bindings=None,output=None,forModule=None,withUnlimited=None,basetime=None,addAttributes={}):\n for x in nar:\n pass\n else:\n v = None\n try:\n # store each lidar record\n for tzg in gen(start_dt, end_dt, timeres_timedelta=timeres_delta, min_alt_m=min_alt_m, max_alt_m=max_alt_m, altres_m=altres_m):\n if v == None:\n v = cfr.DplCreateCfradial(cdl, netcdf, tzg)\n v.append_data(tzg)\n\n v.close()\n\n except RuntimeError, msg:\n print msg\n traceback.print_exc()\n print 'write_cfradial: could not process data for %s starting at %s' % \\\n (instrument, start_dt.strftime('%Y-%m-%d %H:%M:%S'))", "def read_dr3_spectrum(path, common_dispersion=None, bounds_error=False):\n\n header_keys = (\"helio_rv\", \"z\", \"z_err\")\n\n with fits.open(path) as image:\n # data array indices:\n # flux, inverse variance, wavelength, andmask, ormask.\n flux, ivar, dispersion, and_mask, or_mask = image[0].data\n\n # Create a meta dictionary that contains things we will probably care \n # about later on, and the path so that we can trace provenance of other\n # things as needed.\n meta = dict(path=path)\n for header_key in header_keys:\n meta[header_key] = image[0].header[header_key.upper()]\n\n # Use the OR mask to set the inverse variances to zero for any pixels with\n # indications of being bad. For example, the bit mask meanings are:\n # 1 : BADCCD : bad pixel on CCD\n # 2 : BADPROFILE : bad profile in extraction\n # 3 : NOSKY : no sky information at this wavelength\n # 4 : BRIGHTSKY : sky level too high\n # 5 : BADCENTER : fibre trace out of the CCD\n # 6 : NODATA : no good data.\n\n # From http://dr3.lamost.org/doc/data-production-description\n\n # These are all bad things. And the LAMOST pipeline people are more familiar\n # with the data than we are. So let's believe them.\n\n rest_dispersion = dispersion * (1 - meta[\"z\"])\n ivar[or_mask > 0] = 0.0\n\n if common_dispersion is not None:\n flux = (interpolate.interp1d(rest_dispersion, flux,\n bounds_error=bounds_error, fill_value=1))(common_dispersion)\n ivar = (interpolate.interp1d(rest_dispersion, ivar,\n bounds_error=bounds_error, fill_value=0))(common_dispersion)\n\n rest_dispersion = common_dispersion\n ivar[ivar < 0] = 0\n\n assert np.all(ivar >= 0), \"negative inverse variances\"\n assert np.all(np.isfinite(flux)), \"non-finite fluxes\"\n\n return (rest_dispersion, flux, ivar, meta)", "def find_hrc_calib_obsid(inst):\n##\n##--- create a list of already processed data\n##\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/6* > '+ zspace\n# os.system(cmd)\n# with open(zspace, 'r') as f:\n# ftest = f.read()\n# wrd = str(inst) + '/61'\n# mc = re.search(wrd, ftest)\n# if mc is not None:\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/61* >' + zspace\n# os.system(cmd)\n#\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/62* >' + zspace\n# os.system(cmd)\n#\n# data = mcf.read_data_file(zspace, remove=1)\n# prev_list = []\n# for ent in data:\n# atemp = re.split('\\/', ent)\n# prev_list.append(int(float(atemp[-1])))\n#\n##\n##--- find today's date and set checking range for the last 30 days\n##\n# today = time.strftime('%Y:%j:%H:%M:%S', time.gmtime())\n# today = int(Chandra.Time.DateTime(today).secs)\n# start = today - 10 * 86400\n##\n##--- extract hrc obsid information\n##\n# line = 'operation=browse\\n'\n# line = line + 'dataset=flight\\n'\n# line = line + 'level=1\\n'\n# line = line + 'detector=hrc\\n'\n# line = line + 'filetype=evt1\\n'\n# line = line + 'tstart=' + str(start) + '\\n'\n# line = line + 'tstop=' + str(today) + '\\n'\n# line = line + 'go\\n'\n#\n# with open('zline', 'w') as fo:\n# fo.write(line)\n#\n# cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > ' + zspace\n# os.system(cmd)\n#\n# mcf.rm_files('./zline')\n#\n# data = mcf.read_data_file(zspace, remove=1)\n##\n##--- select obsids with 61* and 62* starting\n##\n# h_list = []\n# for ent in data:\n# mc = re.search('hrcf', ent)\n# if mc is not None:\n# atemp = re.split('hrcf', ent)\n# btemp = re.split('_', atemp[1])\n# obsid = int(float(btemp[0]))\n# if obsid > 61000 and obsid < 63000:\n##\n##--- if it is already observed skip it\n##\n# if obsid in prev_list:\n# continue\n##\n##--- check which instrument\n##\n# chk = check_inst(obsid)\n# if chk == inst:\n# h_list.append(obsid)\n\n\n\n h_list = ['62410', '62423', '62435', '62437', '62439', '62441', '62443', '62635', '62637', '62649', '62973', '62997', '62422', '62426', '62436', '62438', '62440', '62442', '62446', '62636', '62638', '62796', '62991']\n\n\n return h_list", "def HD_input_sncosmo_data(self, sn_list):\n\n dico = cPickle.load(open(SUGAR_parameter_pkl))\n self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_5_nomodelcov.txt')\n self.read_meta()\n self.read_snfit_results()\n Filtre = np.array([True]*len(self.sncosmo_sn_name))\n self.zcmb = []\n self.z_err = []\n for j, sn_name in enumerate(self.sncosmo_sn_name):\n# if self.sncosmo_sn_name[j] in dico.keys():\n#\n# for i in range (len(self.meta_sn_name_list)):\n# if self.sncosmo_sn_name[j] == self.meta_sn_name_list[i]:\n# \n# self.z_err.append(self.meta_zhl_err[i])\n# self.zcmb.append(self.meta_zcmb[i])\n# if np.abs(self.sncosmo_x1[j] - self.x1[i]) > 0.01:\n# i print 'problem with %s include in sample but big difference between sncosmo and snfit'%(self.sncosmo_sn_name[j])\n# else:\n# Filtre[j] = False\n if sn_name in sn_list:\n Filtre[j] = True\n else:\n Filtre[j] = False\n\n for p in dico.keys():\n if p not in self.sncosmo_sn_name:\n print p\n\n self.sncosmo_x1 = self.sncosmo_x1[Filtre]\n self.sncosmo_x1_err = self.sncosmo_x1_err[Filtre] \n self.sncosmo_c = self.sncosmo_c[Filtre]\n self.sncosmo_c_err = self.sncosmo_c_err[Filtre]\n self.sncosmo_mb = self.sncosmo_mb[Filtre]\n self.sncosmo_mb_err = self.sncosmo_mb_err[Filtre]\n self.sncosmo_cov_x1_c = self.sncosmo_cov_x1_c[Filtre]\n self.sncosmo_cov_mb_x1 = self.sncosmo_cov_mb_x1[Filtre]\n self.sncosmo_cov_mb_c = self.sncosmo_cov_mb_c[Filtre]\n self.sncosmo_z = self.sncosmo_z[Filtre]\n self.zcmb = np.array(self.zcmb)\n self.z_err = np.array(self.z_err)\n\n self.sncosmo_cov_y = np.zeros((len(self.sncosmo_mb)*3,len(self.sncosmo_mb)*3))\n \n for i in range (len(self.sncosmo_mb)):\n self.sncosmo_cov_y[i*3,i*3] = self.sncosmo_mb_err[i]**2\n self.sncosmo_cov_y[i*3+ 1,i*3+ 1] = self.sncosmo_x1_err[i]**2\n \n self.sncosmo_cov_y[i*3+ 2,i*3+ 2] = self.sncosmo_c_err[i]**2\n self.sncosmo_cov_y[i*3+ 0,i*3+ 1] = self.sncosmo_cov_mb_x1[i]\n self.sncosmo_cov_y[i*3+ 1,i*3+ 0] = self.sncosmo_cov_mb_x1[i]\n self.sncosmo_cov_y[i*3+ 0,i*3+ 2] = self.sncosmo_cov_mb_c[i]\n self.sncosmo_cov_y[i*3+ 2,i*3+ 0] = self.sncosmo_cov_mb_c[i]\n self.sncosmo_cov_y[i*3+ 1,i*3+ 2] = self.sncosmo_cov_x1_c[i] \n self.sncosmo_cov_y[i*3+ 2,i*3+ 1] = self.sncosmo_cov_x1_c[i] \n \n self.salt_parm = np.array([self.sncosmo_mb,self.sncosmo_x1,self.sncosmo_c]).T\n print len(self.salt_parm), len(self.sncosmo_cov_y), len(self.sncosmo_z), len(self.zcmb)\n return self.salt_parm, self.sncosmo_cov_y, self.sncosmo_z, self.zcmb, self.z_err", "def consolidate(max_rounds, int_fwm,master_index, index, filename = 'data_large'):\n\n\n layer_0 = '0/0'\n filepath = 'output{}/output{}/data/'.format(master_index, index)\n file_read = filepath + filename\n file_save = filepath + filename+'_conc'\n \n # Input data, small, no need to cons\n D = read_variables(file_read, '0/0')\n save_variables(file_save, 'input', **D)\n\n if max_rounds ==0:\n max_rounds +=1\n U_cons = np.zeros([4,max_rounds, 7*int_fwm.nt], dtype = np.complex128)\n # Reading of all the oscillating spectra and sending them to a 3D array\n unfortmated_string = '{}/{}/U'\n with h5py.File(file_read+'.hdf5', 'r') as f:\n for pop in range(1,5):\n for r in range(max_rounds):\n U_cons[pop - 1,r,:] = f.get(unfortmated_string.format(pop,r)).value\n save_variables(file_save, 'results', U = U_cons) \n os.system('mv '+file_save+'.hdf5 '+file_read+'.hdf5')\n return None", "def test_real_file(self):\n log.info('===== START TEST BYTE LOSS =====')\n\n # Recovered\n file_path = os.path.join(RESOURCE_PATH, '11079364_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n particles = self.parser.get_records(182)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079364_SNA_SNA_recov.yml', RESOURCE_PATH)\n self.assertEquals(self.exception_callback_value, [])\n stream_handle.close()\n\n # Telemetered\n file_path = os.path.join(RESOURCE_PATH, '11079419_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle)\n\n particles = self.parser.get_records(172)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079419_SNA_SNA_telem.yml', RESOURCE_PATH)\n stream_handle.close()\n\n log.info('===== END TEST REAL FILE =====')", "def test_header_update8(self, capsys):\n\n # Prepare input files.\n self.get_data(\"input\", \"ocu252cmq_raw.fits\")\n self.get_data(\"input\", \"ocu252cmq_spt.fits\")\n\n capsys.readouterr()\n\n tastis('ocu252cmq_raw.fits', update=True)\n\n captured = capsys.readouterr()\n assert captured.out == \"===============================================================================\\n\" \\\n \"ocu252cmq HST/STIS MIRVIS F28X50OII ACQ/POINT\\n\" \\\n \"prop: 14143 visit: 52 line: 1 target: BD+41-3306\\n\" \\\n \"obs date, time: 2016-06-06 08:30:05 exposure time: 2.10\\n\" \\\n \"dom GS/FGS: N2JU001340F2 sub-dom GS/FGS: N2K1001229F1\\n\" \\\n \"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\\n\" \\\n \"subarray (axis1,axis2): size=(100,100) corner=(487,466)\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Coarse locate phase: Target flux in max checkbox (DN): 1442\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 527.8 513.1 41.8 48.1\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -7.9 -2.9 -0.400 -0.147 -0.387 -0.179\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Fine locate phase: Target flux in max checkbox (DN): 611\\n\" \\\n \"\\n\" \\\n \" global local\\n\" \\\n \" axis1 axis2 axis1 axis2\\n\" \\\n \"Target location: 534.1 516.1 48.1 51.1\\n\" \\\n \"Ref ap location: 537.5 516.5 19.5 16.5\\n\" \\\n \"\\n\" \\\n \" axis1 axis2 axis1 axis2 V2 V3\\n\" \\\n \" (pixels) (arcsec) (arcsec)\\n\" \\\n \"Estimated slew: -2.1 -0.4 -0.106 -0.020 -0.089 -0.061\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"Total est. slew: -10.0 -3.3 -0.506 -0.168 -0.477 -0.239\\n\" \\\n \"-------------------------------------------------------------------------------\\n\" \\\n \"The fluxes in the maximum checkbox in the fine and coarse stages differ\\n\" \\\n \"by more than 25%. This may indicate a problem with your acquisition.\\n\" \\\n \"\\n\" \\\n \"===============================================================================\\n\"\n\n # Compare results\n outputs = [(\"ocu252cmq_raw.fits\", \"ocu252cmq_raw_ref.fits\")]\n self.compare_outputs(outputs)", "def read(files, save):\n\t# NOTE all soundings are size obs long, they must be filled in with zeros for this data format...\n\t# create the HDF5 document\n\tdoc = h5(save)\n\tsize = 450 # this hopefully exceeds the size of the arrays # CPIN Files are much shorter...\n\tdoc.create(pres=size, temp=size, dewpt=size, rh=size, r=size, u=size, v=size, z=size, lat=1, lon=1, theta=size, thte=size,\n\t\twspd=size, wdir=size, gamma=size, stab=size, N=size, rich=size, thtdef=size, cpin=size)\n\t# those last two do not have to be included...\n\t# Z=geopotenital height\n\n\t# now read the files!\n\tfor f in sorted(files):\n\t\tfname = f.split('/')[-1]\n\t\t# if 'smth' not in fname and NCAR not in fname: continue\n\t\tl.info('reading ' + fname)\n\t\t# launch time comes from line 2 of the file, the last element\n\t\tdf = open(f, 'r')\n\t\ttxt = df.read(2000).split('\\n') # way more than we need\n\t\tdf.close()\n\t\tlatln = txt[0].split() # keys 1,2 will be what we want\n\t\ttry:\n\t\t\ttm = s2t(txt[1].split()[-1] + 'UTC', '%Y%m%d%H%M%Z')\n\t\texcept:\n\t\t\t# drat.\n\t\t\tprint txt.split('\\n')[1]\n\t\t\tcontinue\n\t\ttry:\n\t\t\tif 'cpin' in fname:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich, thtdef, cpin = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\t\telse:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\texcept:\n\t\t\tl.warning('This file could not be read')\n\t\t\tcontinue\n\n\t\t# and append this data! I will trust the time seconds, instead of recomputing the time\n\t\t# but, before that, we have to make them all the same size - size long\n\t\tnl = np.zeros(size - t.shape[0]) - 999.00 # -999 array to fluff the end\n\t\tp = np.concatenate((p, nl))\n\t\tt = np.concatenate((t, nl))\n\t\ttd = np.concatenate((td, nl))\n\t\trh = np.concatenate((rh, nl))\n\t\tr = np.concatenate((r, nl))\n\t\ttv = np.concatenate((tv, nl))\n\t\ttht = np.concatenate((tht, nl))\n\t\tthte = np.concatenate((thte, nl))\n\t\tws = np.concatenate((ws, nl))\n\t\twd = np.concatenate((wd, nl))\n\t\tgamma = np.concatenate((gamma, nl))\n\t\tstab = np.concatenate((stab, nl))\n\t\tN = np.concatenate((N, nl))\n\t\trich = np.concatenate((rich, nl))\n\t\tu = np.concatenate((u, nl))\n\t\tv = np.concatenate((v, nl))\n\t\tz = np.concatenate((z, nl))\n\t\tif 'cpin' in fname:\n\t\t\tcpin = np.concatenate((cpin, nl))\n\t\t\tthtdef = np.concatenate((thtdef, nl))\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich, cpin=cpin, thtdef=thtdef)\n\t\telse:\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich)\n\tdoc.close()", "def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs\"\n \"/lake_analysis_one_21_Jun_2021\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_two_26_Mar_2022\")\n ref_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/diag_version_29_date_0_original_truesinks\",\n \"30min_flowtocell.nc\")\n data_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0_original_truesinks\",\n \"30min_flowtocell.nc\")\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_extract_ls_mask_from_corrected_HD_rdirs_20160504_142435.nc\")\n ref_catchment_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/diag_version_29_date_0_original_truesinks\",\n \"30min_catchments.nc\")\n data_catchment_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0_original_truesinks\",\n \"30min_catchments.nc\")\n ref_rdirs_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/\"\n \"diag_version_29_date_0_original_truesinks\",\n \"30min_rdirs.nc\")\n reference_rmouth_outflows_filename=os.path.join(ref_base_dir,\n \"rivers/results/default_orog_corrs/\"\n \"diag_version_29_date_0_original_truesinks\",\n \"30min_rmouth_flowtocell.nc\")\n data_rmouth_outflows_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0_original_truesinks\",\n \"30min_rmouth_flowtocell.nc\")\n #glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=80,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='default',\n catchment_and_outflows_mods_list_filename=\\\n None,\n #additional_matches_list_filename=\\\n #\"additional_matches_ice6g_vs_ice5g_lgm.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='HD')", "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)", "def retrieve_cloudmask(\n self, output_binary=True, include_thermal_test=True, include_channel_r5=True\n ):\n\n # Read visual near infrared (VNIR) channels at 15m resolution.\n r1 = self.get_reflectance(channel=\"1\")\n r2 = self.get_reflectance(channel=\"2\")\n r3N = self.get_reflectance(channel=\"3N\")\n\n # Read short-wave infrared (SWIR) channels at 30m resolution and match\n # VNIR resolution.\n r5 = self.get_reflectance(channel=\"5\")\n if self.datetime > datetime.datetime(2007, 5, 1) or not include_channel_r5:\n # The SWIR sensor suffered from temperature problems after May\n # 2007. Images later on are set to a dummy value \"1\", which won't\n # influence the following thresholding tests. Swath edge NaN pixels\n # stay NaN.\n r5[~np.isnan(r5)] = 1\n r5 = np.repeat(np.repeat(r5, 2, axis=0), 2, axis=1)\n\n # Read thermal (TIR) channel at 90m resolution and match VNIR\n # resolution.\n bt14 = self.get_brightnesstemperature(channel=\"14\")\n bt14 = np.repeat(np.repeat(bt14, 6, axis=0), 6, axis=1)\n\n # Ratios for clear-cloudy-tests.\n r3N2 = r3N / r2\n r12 = r1 / r2\n\n ### TEST 1-4 ###\n # Set cloud mask to default \"confidently clear\".\n clmask = np.ones(r1.shape, dtype=np.float) * 2\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\", r\"invalid value encountered\")\n\n # Set \"probably clear\" pixels.\n clmask[\n multiple_logical(\n r3N > 0.03,\n r5 > 0.01,\n 0.7 < r3N2,\n r3N2 < 1.75,\n r12 < 1.45,\n func=np.logical_and,\n )\n ] = PROBABLY_CLEAR\n\n # Set \"probably cloudy\" pixels.\n clmask[\n multiple_logical(\n r3N > 0.03,\n r5 > 0.015,\n 0.75 < r3N2,\n r3N2 < 1.75,\n r12 < 1.35,\n func=np.logical_and,\n )\n ] = PROBABLY_CLOUDY\n\n # Set \"confidently cloudy\" pixels\n clmask[\n multiple_logical(\n r3N > 0.065,\n r5 > 0.02,\n 0.8 < r3N2,\n r3N2 < 1.75,\n r12 < 1.2,\n func=np.logical_and,\n )\n ] = CONFIDENTLY_CLOUDY\n\n # Combine swath edge pixels.\n clmask[\n multiple_logical(\n np.isnan(r1),\n np.isnan(r2),\n np.isnan(r3N),\n np.isnan(r5),\n func=np.logical_or,\n )\n ] = np.nan\n\n if include_thermal_test:\n ### TEST 5 ###\n # Uncertain warm ocean pixels, higher than the 5th percentile of\n # brightness temperature values from all \"confidently clear\"\n # labeled pixels, are overwritten with \"confidently clear\".\n\n # Check for available \"confidently clear\" pixels.\n nc = np.sum(clmask == 2) / np.sum(~np.isnan(clmask))\n if nc > 0.03:\n bt14_p05 = np.nanpercentile(bt14[clmask == 2], 5)\n else:\n # If less than 3% of pixels are \"confidently clear\", test 5\n # cannot be applied according to Werner et al., 2016. However,\n # a sensitivity study showed that combining \"probably clear\"\n # and \"confidently clear\" pixels in such cases leads to\n # plausible results and we derive a threshold correspondingly.\n bt14_p05 = np.nanpercentile(\n bt14[np.logical_or(clmask == 2, clmask == 3)], 5\n )\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\", r\"invalid value encountered\")\n # Pixels with brightness temperature values above the 5th\n # percentile of clear ocean pixels are overwritten with\n # \"confidently clear\".\n clmask[np.logical_and(bt14 > bt14_p05, ~np.isnan(clmask))] = 2\n\n # Combine swath edge pixels.\n clmask[np.logical_or(np.isnan(clmask), np.isnan(bt14))] = np.nan\n\n if output_binary:\n clmask[np.logical_or(clmask == 2, clmask == 3)] = 0 # clear\n clmask[np.logical_or(clmask == 4, clmask == 5)] = 1 # cloudy\n\n return clmask", "def ncwrt_retrieval_obs_s1(retr_setup, outname=None):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'obs_s1.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #-- retrieval settings\n s1_table = retr_setup.obs_dct['S1']\n timepts = s1_table.geom.date_utc\n npts = len(timepts)\n s1_satid = np.array(s1_table.sat_id_lst, dtype=str)\n s1_data = s1_table.data\n s1_dataunc = s1_table.dataunc\n nt,npol = s1_data.shape\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n d2 = ncfp.createDimension('npol',npol)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n unit_one = np.float64(1)\n # backscatter\n ncvar = ncfp.createVariable( 'backscatter', np.float64, ('npoints','npol'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'backscatter in VH and VV polarisation')\n comment = \"VH is associated to npol=0, VV to npol=1.\"\n comment += \" linear units are used (not [dB]).\"\n ncvar.setncattr('comment', comment)\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n ncvar[:,:] = s1_data[:,:]\n\n # backscatter uncertainty\n ncvar = ncfp.createVariable( 'backscatter_unc', np.float64, ('npoints','npol'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'backscatter uncertainty in VH and VV polarisation')\n comment = \"uniform uncertainty of {} [dB] was applied on the observed backscatter\".format(\n retr_setup.s1_unc_db)\n ncvar.setncattr('comment', comment)\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n ncvar[:,:] = s1_dataunc[:,:]\n\n # satellite identifier\n ncvar = ncfp.createVariable( 'satellite_id', str, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'satellite identifer')\n ncvar[:] = s1_satid[:]\n\n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def getCSD (LFP_input_data=None,LFP_input_file=None,sampr=None,dt=None,spacing_um=None,minf=0.05,maxf=300,norm=True,vaknin=True,save_to_sim=True,getAllData=False): # timeRange=None,\n\n ############### DEFAULT -- CONDITION 1 : LFP DATA COMES FROM SIMULATION ###############\n\n if LFP_input_data is None and LFP_input_file is None: ### GET LFP DATA FROM SIMULATION\n try:\n from .. import sim \n except:\n print('No LFP input data, input file, or existing simulation. Cannot calculate CSD.')\n else:\n ## Check if LFP was recorded during the simulation \n print('getCSD() is using LFP data from existing simulation.')\n\n\n # time step used in simulation recording \n if dt is None:\n dt = sim.cfg.recordStep # units: ms \n print('dt = ' + str(dt) + ' (units: ms)')\n\n\n sim_data_categories = sim.allSimData.keys()\n \n # Get LFP data from sim and instantiate as a numpy array \n if 'LFP' in sim_data_categories:\n lfp_data = np.array(sim.allSimData['LFP'])\n print('lfp_data shape = ' + str(lfp_data.shape))\n elif 'LFP' not in sim_data_categories:\n print('!! WARNING: NO LFP DATA !! Need to re-run simulation with cfg.recordLFP enabled')\n\n\n # Sampling rate of data recording during the simulation \n if sampr is None:\n sampr = 1./(sim.cfg.recordStep/1000.0) # divide by 1000.0 to turn denominator from units of ms to s\n\n\n # Spacing between electrodes --> convert from micron to mm \n if spacing_um is None:\n print('NOTE: using sim.cfg.recordLFP to determine spacing_um !!')\n spacing_um = sim.cfg.recordLFP[1][1] - sim.cfg.recordLFP[0][1]\n\n\n\n\n ############### CONDITION 2 : ARBITRARY LFP DATA ############################\n ## NOTE: EXPAND CAPABILITY TO INCLUDE LIST OF MULTIPLE FILES \n \n ## LOAD SIM DATA FROM JSON FILE\n elif LFP_input_data is None and '.json' in LFP_input_file:\n data = {}\n with open(LFP_input_file) as file:\n data['json_input_data'] = json.load(file)\n\n ## FOR MULTIPLE FILES\n #for x in LFP_input_file:\n #with open(x) as file:\n #data[x] = json.load(file)\n\n\n ## EXTRACT LFP DATA \n for key in data.keys:\n lfp_data_list = data[key]['simData']['LFP'] # only works in the 1 input file scenario; expand capability for multiple files \n \n ## CAST LFP DATA AS NUMPY ARRAY \n lfp_data = np.array(lfp_data_list)\n\n ## GET CSD DATA AND RELEVANT PLOTTING PARAMS \n csd_data = {}\n for i in data.keys():\n csd_data[i] = {} # e.g. csd['json_input_data'] = {}\n\n if sampr is None:\n csd_data[i]['sampr'] = 1./((data[i]['simConfig']['recordStep'])/1000.0) # assumes that data[i]['simConfig']['recordStep'] is in units of ms\n sampr = csd_data[i]['sampr']\n else:\n csd_data[i]['sampr'] = sampr\n\n if spacing_um is None:\n csd_data[i]['spacing_um'] = data[i]['simConfig']['recordLFP'][1][1] - data[i]['simConfig']['recordLFP'][0][1]\n spacing_um = csd_data[i]['spacing_um']\n else:\n csd_data[i]['spacing_um'] = spacing_um\n\n if dt is None:\n csd_data[i]['dt'] = data[i]['simConfig']['recordStep']\n dt = csd_data[i]['dt']\n else:\n csd_data[i]['dt'] = dt\n\n\n\n ## FOR LIST OF LFP DATA WITHOUT ANY .JSON INPUT FILE \n elif len(LFP_input_data) > 0 and LFP_input_file is None: # elif LFP_input_file is None and ...\n lfp_data = np.array(LFP_input_data) # get lfp_data and cast as numpy array\n\n\n\n\n ##############################################################################\n # Now lfp_data exists for either existing (e.g. empirical) or simulated data \n ##############################################################################\n\n # Convert spacing from microns to mm \n spacing_mm = spacing_um/1000\n\n # Bandpass filter the LFP data with getbandpass() fx defined above\n datband = getbandpass(lfp_data,sampr,minf,maxf) \n\n # Take CSD along smaller dimension\n if datband.shape[0] > datband.shape[1]:\n ax = 1\n else:\n ax = 0\n\n # VAKNIN CORRECTION\n if vaknin: \n datband = Vaknin(datband)\n\n # NORM <-- ASKING SAM MORE ABOUT THIS\n if norm: \n removemean(datband,ax=ax)\n\n # now each column (or row) is an electrode -- take CSD along electrodes\n CSD_data = -np.diff(datband,n=2,axis=ax)/spacing_mm**2 ## CSD_data should be in mV/mm**2, assuming that LFP data is in mV. \n \n\n ########################################\n ########## noBandpass trial ############\n ########################################\n datband_noBandpass = lfp_data.T\n \n if datband_noBandpass.shape[0] > datband_noBandpass.shape[1]:\n ax = 1\n else:\n ax = 0\n \n if vaknin:\n datband_noBandpass = Vaknin(datband_noBandpass)\n \n if norm:\n removemean(datband_noBandpass,ax=ax)\n \n CSD_data_noBandpass = -np.diff(datband_noBandpass,n=2,axis=ax)/spacing_mm**2 # noBandpass trial \n ##########################################\n\n\n\n ################## SAVING DATA ##########################\n # Add CSD and other param values to sim.allSimData for access outside of this function or script \n if save_to_sim is True: ## FROM SIM \n try:\n from .. import sim \n sim.allSimData['CSD'] = {}\n #sim.allSimData['CSD']['timeRange'] = timeRange \n sim.allSimData['CSD']['sampr'] = sampr\n sim.allSimData['CSD']['spacing_um'] = spacing_um \n sim.allSimData['CSD']['CSD_data'] = CSD_data\n sim.allSimData['CSD']['CSD_data_noBandpass'] = CSD_data_noBandpass # noBandpass trial \n except:\n print('NOTE: No sim.allSimData construct available to store CSD data')\n\n\n\n # RETURN CSD AND OTHER RELEVANT PARAM VALUES, IF DESIRED \n if getAllData is True:\n return lfp_data, CSD_data, sampr, spacing_um, dt\n if getAllData is False:\n return CSD_data # returns CSD in units of mV/mm**2 (assuming lfps are in mV)", "def dsinfomaker(compath, backpath, mwb, tcfs, SR=\"SR\"):#yrs, ves,\r\n\tdsinfo = OrderedDict()\r\n\t# ==========\r\n\tdsinfo[\"GFED\"] = ({\"alias\":\"GFED4.1\",\"long_name\":\"FRI\", \"units\":\"yrs\"})\r\n\tdsinfo[\"MODIS\"] = ({\"alias\":\"MCD64A1\", \"long_name\":\"FRI\",\"units\":\"yrs\", \"version\":\"v006\"})\r\n\tdsinfo[\"esacci\"] = ({\"alias\":\"FireCCI5.1\", \"long_name\":\"FRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"COPERN_BA\"] = ({\"alias\":\"CGLS\", \"long_name\":\"FRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"HANSEN_AFmask\"] = ({\"alias\":\"Hansen GFC & MCD14ML\", \"long_name\":f'FRI$_{{{SR}}}$',\"units\":\"yrs\"})\r\n\tdsinfo[\"HANSEN\"] = ({\"alias\":\"Hansen GFC\", \"long_name\":\"DRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"Risk\"] = ({\"alias\":\"Forest Loss Risk\"})\r\n\t# dsinfo[\"FutureRisk\"] = ({\"alias\":\"Forest Loss Risk\"})\r\n\tdsinfo[\"SRfrac\"] = ({\"alias\":\"Stand Replacing Fire Percentage\", \"long_name\":f'FRI$_{{{\"SR\"}}}$ %'})\r\n\r\n\tfor dsnm in dsinfo:\r\n\t\tif dsnm.startswith(\"H\"):\r\n\t\t\t# +++++ make a path +++++\r\n\t\t\tppath = compath + \"/BurntArea/HANSEN/FRI/\"\r\n\t\t\tfname = \"%s%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, tcfs, mwb)\r\n\t\t\t# fname = \"%s%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\telif dsnm == \"Risk\":\r\n\t\t\tppath = compath + \"/BurntArea/Risk/FRI/\"\r\n\t\t\tfname = \"%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\t\tcf.pymkdir(ppath)\r\n\t\t# elif dsnm == \"FutureRisk\":\r\n\t\t# \tppath = compath + \"/BurntArea/Risk/FRI/\"\r\n\t\t# \tfname = f\"{dsnm}_annual_burns_MW_{mwb}degreeBox_{yrs}yrs_{ves}.nc\" \r\n\t\t# \tcf.pymkdir(ppath)\r\n\t\telse:\r\n\t\t\t# fname = \"Hansen_GFC-2018-v1.6_regrided_esacci_FRI_%ddegMW_SIBERIA\" % (mwb)\r\n\t\t\tppath = compath + \"/BurntArea/%s/FRI/\" % dsnm\r\n\t\t\tfname = \"%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\t# +++++ open the datasets +++++\r\n\t\tdsinfo[dsnm][\"fname\"] = ppath+fname\r\n\r\n\r\n\treturn dsinfo", "def txt2hdf5_mudis(config, init_file=0, final_file=100, step=1, expo='100'):\n # --------SKYMAP--------------\n # Create the directory to save the results\n os.makedirs(os.path.dirname(cwd + '/config_files/'), exist_ok=True)\n\n alignment = add_align()\n\n # Extract skymap from alignment file\n skymap = np.zeros((len(alignment), 2))\n\n for i in np.arange(len(skymap)):\n skymap[i] = alignment['Azimuth'][i], alignment['Zenith'][i]\n\n # Save Skymap information\n with h5py.File(cwd + '/config_files/skymap_radiance.h5', 'w') as sky:\n\n if not list(sky.items()):\n sky.create_dataset('/skymap', data=skymap)\n else:\n del sky['skymap']\n\n sky.create_dataset('/skymap', data=skymap, dtype='f4')\n sky['skymap'].attrs['Columns'] = 'Azimuth, Zenith'\n sky['skymap'].dims[0].label = 'channel'\n sky['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n config['skymap'] = skymap\n\n # Save MUDIS file information\n\n # Import the radiance data from sensor\n files = sorted(\n glob.glob(config['raw_dir'] + '/radiance/{}/data/data_*.txt'.format(config['date'])))\n\n print('Total files in the directory: ' + str(len(files)) + ' files')\n\n ans = input('convert all files? (y/n): ')\n\n if ans == 'n':\n print('configure initial and final file index in the function options')\n else:\n init_file = 0\n final_file = len(files)\n\n for fil in np.arange(init_file, final_file):\n # Import the data from the file\n file = np.genfromtxt(files[fil], delimiter='', skip_header=11)\n\n # ------------RADIANCE DATA RAW---------------\n # create the radiance matrix\n data = np.zeros([113, 992])\n\n for i in np.arange(113):\n if str(alignment.iloc[i][3]) == 'nan':\n data[i] = np.nan\n else:\n try:\n data[i] = file[:, int(alignment.iloc[i][3] + config['channel_pixel_adj'])] #\n except:\n pass #\n # read the pixels index\n # in the alignment file and copy the\n # data in the radiance matrix']))\n\n # Correct time for the file UTC\n name = os.path.split(files[fil])\n\n # Read name of the file (correct time)\n time = name[1][6:25]\n # convert time to datetime format\n time = datetime.datetime.strptime(time, '%d.%m.%Y_%H_%M_%S')\n # print(time)\n new_name = datetime.datetime.strftime(time, '%Y%m%d_%H%M%S')\n\n with open(files[fil], 'r') as file:\n dat = file.readlines()\n\n # Extract information from .dat file\n exposure = int(dat[4][12:-1])\n NumAve = int(dat[7][17:-1])\n CCDTemp = int(dat[8][15:-1])\n NumSingMes = int(dat[10][27:-1])\n ElectrTemp = int(dat[9][23:-1])\n\n # Create the directory to save the results\n os.makedirs(os.path.dirname(config['str_dir'] + '/radiance/{}/data/').format(config['date']),\n exist_ok=True)\n\n if exposure == expo:\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(config['date'], new_name),\n 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data', data=data, dtype='f4')\n datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = exposure\n datos['data'].attrs['NumAver'] = NumAve\n datos['data'].attrs['CCDTemp'] = CCDTemp\n datos['data'].attrs['NumSingMes'] = NumSingMes\n datos['data'].attrs['ElectrTemp'] = ElectrTemp\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n datos['skymap'].attrs['Columns'] = 'Azimuth, Zenith'\n\n datos.close()\n\n print('File ' + str(fil + init_file + 1) + ' of ' +\n str((final_file - init_file)) + ' saved')\n else:\n print('Exposure are not same', expo, exposure)\n break\n\n print('Completed')", "def test_calc_ch4_working_cap():\n OUTPUT_PARAMETERS = Dict(\n dict={\n 'Density': 0.440527,\n 'Density_unit': 'g/cm^3',\n 'Estimated_saturation_loading': 41.985376,\n 'Estimated_saturation_loading_unit': 'mol/kg',\n 'Input_block': [1.865, 100],\n 'Input_ha': 'DEF',\n 'Input_structure_filename': 'tmp4a13iby3.cif',\n 'Input_volpo': [1.865, 1.865, 100000],\n 'Number_of_blocking_spheres': 0,\n 'POAV_A^3': 8623.69,\n 'POAV_A^3_unit': 'A^3',\n 'POAV_Volume_fraction': 0.67999,\n 'POAV_Volume_fraction_unit': None,\n 'POAV_cm^3/g': 1.54358,\n 'POAV_cm^3/g_unit': 'cm^3/g',\n 'PONAV_A^3': 0.0,\n 'PONAV_A^3_unit': 'A^3',\n 'PONAV_Volume_fraction': 0.0,\n 'PONAV_Volume_fraction_unit': None,\n 'PONAV_cm^3/g': 0.0,\n 'PONAV_cm^3/g_unit': 'cm^3/g',\n 'Unitcell_volume': 12682.1,\n 'Unitcell_volume_unit': 'A^3',\n 'adsorption_energy_widom_average': -11.1626207486,\n 'adsorption_energy_widom_dev': 0.02083606,\n 'adsorption_energy_widom_unit': 'kJ/mol',\n 'conversion_factor_molec_uc_to_cm3stp_cm3': 2.9347915768,\n 'conversion_factor_molec_uc_to_mg_g': 4.7676018308,\n 'conversion_factor_molec_uc_to_mol_kg': 0.2972320343,\n 'henry_coefficient_average': 7.71003e-06,\n 'henry_coefficient_dev': 1.65115e-08,\n 'henry_coefficient_unit': 'mol/kg/Pa',\n 'is_kh_enough': True,\n 'is_porous': True,\n 'isotherm': {\n 'enthalpy_of_adsorption_average': [-13.510607783958, -10.787702310577],\n 'enthalpy_of_adsorption_dev': [0.76886345231266, 1.0196832123586],\n 'enthalpy_of_adsorption_unit': 'kJ/mol',\n 'loading_absolute_average': [3.6279844874624, 16.11968088498],\n 'loading_absolute_dev': [0.15865715470393, 0.075109385284932],\n 'loading_absolute_unit': 'mol/kg',\n 'pressure': [5.8, 65],\n 'pressure_unit': 'bar'\n },\n 'temperature': 298,\n 'temperature_unit': 'K'\n })\n\n results = engine.run(calc_ch4_working_cap, OUTPUT_PARAMETERS)\n results_dict = results.get_dict()\n\n assert results_dict['wc_65bar_mol/kg_average'] == pytest.approx(12.5, abs=0.1)", "def main():\n #diffuser_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Spectrometer\\Diffuser_Irradiance\\saved_quads\\3845_ms\\saved_plots_modified'\n #diffuser_light_data = os.path.join(diffuser_data_dir,'Light_data')\n #diffuser_dark_data = os.path.join(diffuser_data_dir, 'Dark_data')\n #print(diffuser_data_dir)\n #cc\n #int_time_diffuser = 3845.0\n\n# radiance_data_dir_UV = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Spectrometer\\Radiance_Cal_UV_Lamp\\saved_quads\\saved_plots_modified'\n# radiance_light_data_UV = os.path.join(radiance_data_dir_UV,'DSS-Y')\n# radiance_dark_data_UV = os.path.join(radiance_data_dir_UV, 'Dark_data')\n## #int_time_radiance = 93.0\n## print(radiance_data_dir_UV)\n##\n radiance_data_dir_VIS = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Spectrometer\\Radiance_Cal_UV_Lamp\\saved_quads\\processed_h5'\n radiance_light_data_VISminusY = os.path.join(radiance_data_dir_VIS,'DSS-Y')\n radiance_light_data_VIS_Center = os.path.join(radiance_data_dir_VIS,'DSS_Center')\n radiance_light_data_VISplusY = os.path.join(radiance_data_dir_VIS,'DSS+Y') \n radiance_dark_data_VIS = os.path.join(radiance_data_dir_VIS, 'Dark_Data')\n\n\n\n # mean_diffuser_data = calculate_mean(diffuser_light_data)\n #mean_diffuser_dark_data = calculate_mean(diffuser_dark_data)\n#\n#\n# #Let's correct for dark current and work in signal rates unit\n# diffuser_dc_corrected = (mean_diffuser_data - mean_diffuser_dark_data)\n# diffuser_dc_corrected = np.round(diffuser_dc_corrected, 2)\n# diffuser_dc_corrected[diffuser_dc_corrected <0] = 0\n# mean_save_dir = os.path.join(diffuser_data_dir,'processed_average_data')\n# mean_save_irradiance = os.path.join(mean_save_dir, 'mean_irradiance_3845ms.csv')\n# np.savetxt(mean_save_irradiance, diffuser_dc_corrected, fmt='%1.3f', delimiter=\",\")\n \n\n\n\n# mean_radiance_data_UV = calculate_mean(radiance_light_data_UV)\n# mean_radiance_dark_data_UV = calculate_mean(radiance_dark_data_UV)\n# radiance_dc_corrected_UV = (mean_radiance_data_UV - mean_radiance_dark_data_UV)\n# radiance_dc_corrected_UV = np.round(radiance_dc_corrected_UV, 2)\n# radiance_dc_corrected_UV[radiance_dc_corrected_UV < 0] = 0\n###\n mean_radiance_dark_data_VIS = calculate_mean_dark(radiance_dark_data_VIS)\n \n \n # Correct for Dark current\n mean_radiance_data_VISminusY = calculate_mean(radiance_light_data_VISminusY) - mean_radiance_dark_data_VIS\n mean_radiance_data_VIS_Center = calculate_mean(radiance_light_data_VIS_Center)- mean_radiance_dark_data_VIS\n mean_radiance_data_VISplusY = calculate_mean(radiance_light_data_VISplusY) - mean_radiance_dark_data_VIS\n \n \n #radiance_dc_corrected_VIS[radiance_dc_corrected_VIS < 0] = 0\n#\n# \n\n# \n# mean_save_dir_UV = os.path.join(radiance_data_dir_UV,'processed_average_data')\n mean_save_dir_VIS = os.path.join(radiance_data_dir_VIS,'Mean_Processed_Data')\n if not os.path.exists(mean_save_dir_VIS):\n os.makedirs(mean_save_dir_VIS)\n #\n# mean_save_radiance_UV = os.path.join(mean_save_dir_UV, 'mean_radiance_DSSminus_UV.csv')\n# mean_save_radiance_VIS = os.path.join(mean_save_dir_VIS, 'mean_radiance_DSSminus_VIS.csv')\n# #\n# \n# np.savetxt(mean_save_radiance_UV, radiance_dc_corrected_UV, fmt='%1.3f', delimiter=\",\")\n# np.savetxt(mean_save_radiance_VIS, radiance_dc_corrected_VIS, fmt='%1.3f', delimiter=\",\")\n print('DONE')\n\n #Write into h5file\n hf_name = os.path.join(mean_save_dir_VIS,'Mean_Data.h5')\n hf = h5py.File(hf_name,'w')\n hf.create_dataset('DSS-Y', data= mean_radiance_data_VISminusY)\n hf.create_dataset('DSS_Center', data=mean_radiance_data_VIS_Center)\n hf.create_dataset('DSS+Y', data= mean_radiance_data_VISplusY)", "def test_run(self):\n files = [\n (\"AS1-1.phy_r8s.txt\", \"AS1-1.phy_r8s.txt_2.5.txt\"),\n (\"AS1-3.phy_r8s.txt\", \"AS1-3.phy_r8s.txt_2.5.txt\"),\n (\"AS1-4.phy_r8s.txt\", \"AS1-4.phy_r8s.txt_2.5.txt\"),\n ]\n for file_pair in files:\n input_file = file_pair[0]\n expected_file = file_pair[1]\n infile = self.test_data_path + input_file\n outfile = self.test_data_path + expected_file\n divnum = 2.5\n result = run(infile, divnum)\n\n with open(outfile) as handle:\n expected_result = handle.read()\n self.assertEqual(expected_result, result)", "def get_data(tstart, tstop, year, grad_list, out_dir):\n print(\"Period: \" + str(tstart) + '<-->' + str(tstop) + ' in Year: ' + str(year))\n#\n#--- extract ecach group data\n#\n for group in grad_list:\n print(group)\n\n line = 'operation=retrieve\\n'\n line = line + 'dataset = mta\\n'\n line = line + 'detector = grad\\n'\n line = line + 'level = 0.5\\n'\n line = line + 'filetype = ' + group + '\\n'\n line = line + 'tstart = ' + str(tstart) + '\\n'\n line = line + 'tstop = ' + str(tstop) + '\\n'\n line = line + 'go\\n'\n\n data_list = mcf.run_arc5gl_process(line)\n#\n#--- read the first fits file and prep for the data list\n#\n [cols, tbdata] = ecf.read_fits_file(data_list[0])\n col_list = []\n for ent in cols:\n if ent.lower() == 'time':\n continue\n mc = re.search('st_', ent.lower())\n if mc is not None:\n continue\n\n col_list.append(ent)\n\n mcf.rm_files(data_list[0])\n tdata = tbdata['time']\n mdata = []\n for col in col_list:\n mdata.append(tbdata[col])\n#\n#--- read the rest of the data\n#\n clen = len(col_list)\n for k in range(1, len(data_list)):\n fits = data_list[k]\n [cols, tbdata] = ecf.read_fits_file(fits)\n tdata = numpy.append(tdata, tbdata['time'])\n\n for m in range(0, clen):\n cdata = tbdata[col_list[m]]\n mdata[m] = numpy.append(mdata[m], cdata)\n\n mcf.rm_files(fits)\n\n dout = out_dir + group.capitalize() + '/'\n\n if not os.path.isdir(dout):\n cmd = 'mkdir ' + dout\n os.system(cmd)\n#\n#--- write out the data to fits file\n#\n for k in range(0, clen):\n col = col_list[k]\n ocols = ['time', col.lower()]\n cdata = [tdata, mdata[k]]\n\n ofits = dout + col.lower()+ '_full_data_' + str(year) +'.fits'\n\n if os.path.isfile(ofits):\n ecf.update_fits_file(ofits, ocols, cdata)\n else:\n ecf.create_fits_file(ofits, ocols, cdata)\n\n#\n#--- zip the fits file from the last year at the beginning of the year\n#\n ecf.check_zip_possible(dout)", "def test_conus():\n sat = gini.GINIZFile(get_test_file(\"TIGN02\", fponly=True))\n assert sat.archive_filename() == \"GOES_SUPER_IR_201509281745.png\"\n assert sat.awips_grid() == 0\n assert sat.metadata[\"map_projection\"] == 5", "def test_fetch_station_data_wdc_format_hour_data_from_wdc(tmpdir): # pylint: disable=invalid-name\n cadence = 'hour'\n station = 'NGK'\n start_date = date(2015, 4, 1)\n end_date = date(2015, 4, 30)\n service = 'WDC'\n expected_filename = 'ngk2015.wdc'\n configpath = os.path.join(DATAPATH, 'wdc_minute_data_wdcoutput.ini')\n # TODO: cf fetch_station_data vs oracle files\n # oraclefile = os.path.join(ORACLEPATH, 'ngk2015.wdc')\n\n # ensure we have somewhere to put the data\n manualdir = os.path.join(str(tmpdir), 'manual')\n funcdir = os.path.join(os.path.dirname(manualdir),\n 'via__fetch_station_data')\n for dir_ in (manualdir, funcdir):\n os.mkdir(dir_)\n manualfile = os.path.join(manualdir, expected_filename)\n funcfile = os.path.join(funcdir, expected_filename)\n\n # 'manual' way\n config = cws.ParsedConfigFile(configpath, service)\n form_data = cws.FormData(config)\n form_data.set_datasets(start_date, end_date, station, cadence, service)\n req_wdc = cws.DataRequest()\n req_wdc.read_attributes(config)\n req_wdc.set_form_data(form_data.as_dict())\n resp_wdc = rq.post(\n req_wdc.url, data=req_wdc.form_data, headers=req_wdc.headers\n )\n\n cws.check_response(resp_wdc.status_code, resp_wdc.content)\n\n with zipfile.ZipFile(BytesIO(resp_wdc.content)) as fzip:\n fzip.extractall(manualdir)\n\n assert not os.path.isfile(funcfile)\n # with wrapper function\n cws.fetch_station_data(\n start_date=start_date, end_date=end_date,\n station=station, cadence=cadence,\n service=service, saveroot=funcdir, configpath=configpath\n )\n assert os.path.isfile(funcfile)\n assert_all_lines_same(funcfile, manualfile)" ]
[ "0.6232863", "0.6190931", "0.6065787", "0.5965935", "0.59624934", "0.5858929", "0.5856982", "0.5837951", "0.5831565", "0.58128524", "0.5789938", "0.57884246", "0.5777758", "0.5774943", "0.57337826", "0.5721014", "0.569534", "0.5672289", "0.56514597", "0.5634904", "0.5580489", "0.55664647", "0.55572945", "0.5547208", "0.55148506", "0.55077463", "0.5495898", "0.5479068", "0.5474958", "0.5472551", "0.5470454", "0.5461943", "0.54608774", "0.5442911", "0.54394644", "0.54363304", "0.54341155", "0.5418639", "0.5418111", "0.5405701", "0.5403194", "0.5401926", "0.54009503", "0.5383155", "0.53806305", "0.53765905", "0.53734565", "0.5370778", "0.5370048", "0.5363304", "0.535799", "0.5355772", "0.5353627", "0.5347003", "0.53448147", "0.53417337", "0.53401864", "0.5337057", "0.53322285", "0.53289014", "0.53220975", "0.5317635", "0.5310569", "0.53078103", "0.5307153", "0.5301299", "0.5297263", "0.52959245", "0.5291642", "0.5287759", "0.5287055", "0.5282357", "0.5279186", "0.52728444", "0.5271845", "0.5265456", "0.5261367", "0.52588433", "0.5247685", "0.52444565", "0.5241643", "0.5239588", "0.52374953", "0.5237098", "0.5229478", "0.522735", "0.5225439", "0.52252835", "0.5219018", "0.52186453", "0.52182627", "0.5214632", "0.52143496", "0.52094245", "0.5206825", "0.5204791", "0.52047503", "0.52006483", "0.5199394", "0.51990664" ]
0.7407615
0
Perform a 'run' action on a module. Module should have status LOADED for a System to actually call this method.
def run(self, **kwargs) -> None: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def module_runner(module):\n task_queue.put(1)\n result = sys.modules[module].run()\n task_queue.get()\n store_module_result(result) # Store the result in our repo", "def do_run(self, line: str):\n if self._real_module is None:\n print(\"'run' command depends on using a module. See 'use' for help.\")\n return\n\n self._real_module.run()", "def run_module(self, path):\n\n module = self.import_module(path)\n result = None\n\n if module:\n try:\n result = module.run()\n except AttributeError:\n self.error('Error Running Module: Missing run() method.')\n except Exception:\n e = sys.exc_info()[1]\n traceback = sys.exc_info()[2]\n self.warning('Exeption caught in module: {0} line: {1}'.format(\n e,\n traceback.tb_lineno))\n self.calls.append({path: result})\n state.save_hook_call(path, result)\n return result", "def RunModule(self):\n module = self.statModules.GetActive()\n \n self.ShutdownStatModule()\n \n if module is not None:\n wx.LogMessage(\"Running stat module...\")\n self.statPage = module.Load(self.nb, self)\n self.SetView(self.statModules.caption, self.statPage)", "def do_run(self, args):\n logger.debug(\"do_run() was called\")\n\t\n parser = CrispyArgumentParser(description=self.do_run.__doc__, prog=\"run\")\n parser.add_argument(\"module\", metavar=\"<module>\", help=\"module name\")\n parser.add_argument(\"session_id\", metavar=\"<session id>\", help=\"session to run on\")\n parser.add_argument(\"arguments\", nargs=argparse.REMAINDER, metavar=\"<arguments>\", help=\"module arguments\")\n \n try:\n pargs = parser.parse_args(shlex.split(args))\n except MyParserException as e:\n print e\n return\n\n try:\n target = self.srv.get_client(int(pargs.session_id))\n except Exception as e:\n fprint.error(\"Session id should be an integer.\")\n return\n\n if not target:\n fprint.error(\"Improper session id.\")\n return\n\n try:\n mod = self.srv.get_module(pargs.module)(target)\n except Exception as me:\n fprint.error(\"Error loading \\\"{}\\\" module: {}\".format(pargs.module, me))\n return\n\n try:\n margs = mod.check_args(pargs.arguments)\n except MyParserException as e:\n print e\n return\n\n try:\n target.run_module(mod, margs)\n except Exception as e:\n fprint.error(\"Error running module: {}\".format(e))\n return", "def execute_module(self):\n raise NotImplementedError", "def exec_module(self, module):\n pass", "def run(self):\n\n self._action.execute()", "def run_module(self, module_name, args=[], kwargs={}):\n if not module_loader.has_plugin(module_name):\n raise UnsupportedAnsibleModule(\"Unsupported ansible module \\\"{}\\\"\".format(module_name))\n self.module_name = module_name\n\n previous_frame = inspect.currentframe().f_back\n caller_info = inspect.getframeinfo(previous_frame)\n kwargs.update({\"caller_info\": caller_info})\n\n return self._run_ansible_module(*args, **kwargs)", "def run_case(self, **kwargs):\n module_name = kwargs.get('module_name', None)\n if self.result:\n self.success_msg.append('>>>%s PASSED' % module_name or sys.modules[__name__])\n else:\n self.fail_msg.insert(0, '>>>%s FAILED' % module_name or sys.modules[__name__])", "def run(self, task):\n\n self._setup()\n\n runnable = load_from_module(task.task)\n runnable(*task.get_args(), **task.get_kwargs())", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module = module_manager.get_module_by_name(self._values[\"name\"])\n module_manager.update_module(module)", "def run_module(self):\n try:\n self.output_dict['wind_delay'] = self.calculate_wind_delay()\n return 0 # module ran successfully\n except:\n return 1 # module did not run successfully", "def _run_ansible_module(self, *args, **kwargs):\n caller_info = kwargs.pop(\"caller_info\", None)\n if not caller_info:\n previous_frame = inspect.currentframe().f_back\n caller_info = inspect.getframeinfo(previous_frame)\n\n module_args = copy.deepcopy(args)\n module_kwargs = copy.deepcopy(kwargs)\n\n verbosity = module_kwargs.pop(\"verbosity\", None)\n if not verbosity:\n verbosity = self.options.get(\"verbosity\", 2)\n module_ignore_errors = module_kwargs.pop(\"module_ignore_errors\", False)\n module_attrs = module_kwargs.pop(\"module_attrs\", {})\n\n module_info = {\n \"module_name\": self.module_name,\n \"args\": module_args,\n \"kwargs\": module_kwargs,\n \"module_attrs\": module_attrs\n }\n self._log_modules(caller_info, module_info, verbosity)\n\n task = self.build_task(**module_info)\n results = self.run_tasks(self.host_pattern, self.loader, self.im, self.vm, self.options, tasks=[task])\n\n self._log_results(caller_info, module_info, results, verbosity)\n self._check_results(caller_info, module_info, results, module_ignore_errors, verbosity)\n\n if isinstance(self, AnsibleHost):\n results = results[self.hostnames[0]]\n\n return results", "def run(self):\r\n self.inst.write(':RUN')", "def RunModule(self):\n\n module = self.viewModule.GetActive()\n self.ShutdownViewModule()\n overviewText = \"\"\n\n # o The RunTest() for all samples must now return a window that can\n # be placed in a tab in the main notebook.\n # o If an error occurs (or has occurred before) an error tab is created.\n\n if module is not None:\n wx.LogMessage(\"Loading view module...\")\n if hasattr(module, \"overview\"):\n overviewText = module.overview\n\n try:\n if hasattr(module, \"GetWindow\"):\n self.viewPage = module.GetWindow(self, self.nb, self)\n elif hasattr(module, \"runTest\"): # Obsoleted\n self.viewPage = module.runTest(self, self.nb, self)\n except:\n self.viewPage = ViewModuleErrorPanel(self.nb, self.codePage,\n ViewModuleError(sys.exc_info()), self)\n\n wx.LogMessage(\"Loaded %s for module %s\" % (self.viewPage, module))\n\n bg = self.nb.GetThemeBackgroundColour()\n if bg:\n self.viewPage.SetBackgroundColour(bg)\n\n assert self.viewPage is not None, \"runTest must return a window!\"\n\n else:\n # There was a previous error in compiling or exec-ing\n self.viewPage = ViewModuleErrorPanel(self.nb, self.codePage,\n self.viewModule.GetErrorInfo(), self)\n\n self.SetOverview(self.viewModule.name + \" Help\", overviewText)\n\n if self.firstTime:\n # change to the view page the first time a module is run\n self.UpdateNotebook(2)\n self.firstTime = False\n else:\n # otherwise just stay on the same tab in case the user has changed to another one\n self.UpdateNotebook()", "def run_run(self, cmds):\n pass", "def tfrun(args, build_modules, build_workspace, build_env):\n\n # loop through each selected module(s) and apply the action as specified by user\n for m in build_modules:\n print(\"\\n\\n****************************************************************************\")\n print(\"Permforming action \\\"{0}\\\" for module {1}\".format(args.action, m))\n print(\"****************************************************************************\\n\\n\")\n run_module(args, m, build_workspace, build_env)", "def action_run(self):\n pass", "def main():\n obj = UnityFilesystem()\n obj.perform_module_operation()", "def modExec(module):\n modName = module.split('_')[-1]\n if \"live\" in module:\n dn = '{0} (live)'.format(modName.upper())\n else:\n dn = '{0}'.format(modName.upper())\n\n try:\n modStart = datetime.utcnow()\n log.info(\"Running {0}\".format(dn))\n modImport = 'modules.' + module\n\n import_module(modImport)\n\n modOutput = [i for i in glob.glob(outputdir + '/*') if all(p in i for p in [modName, runID])]\n try:\n arch = [archive.add_file(os.path.basename(outfile)) for outfile in modOutput]\n except IndexError:\n pass\n\n modEnd = datetime.utcnow()\n modRuntime = modEnd - modStart\n log.debug(\"{0} finished in {1}.\".format(dn, modRuntime))\n\n except KeyboardInterrupt:\n sys.stdout.write('\\r')\n sys.stdout.flush()\n log.error(\"{0} was killed. \".format(module))\n\n except Exception:\n log.error(\"{0} failed: {1}\".format(module, [traceback.format_exc()]))", "def run(self):\n self._setupLogger()\n self.setup()\n\n self.logger.info(self.moduleName + \" starting run loop.\")\n\n while True:\n self.loop()", "def Run():\r\n pass", "def run_script(self):\n pass", "def run(self):\n try:\n self._run()\n except Exception as err:\n # TODO: Do Task Failure to run exception handling\n pass", "def execute_module(self, \n project_id: str, \n branch_id: str, \n module: ModuleHandle, \n artifacts: Dict[str, ArtifactDescriptor]\n ) -> None:\n task = ExtendedTaskHandle(\n project_id=project_id,\n branch_id=branch_id,\n module_id=module.identifier,\n controller=self\n )\n self.tasks[task.task_id] = task\n # print(\"Starting execution of {} with artifacts: [{}]\".format(module.command.command_id, artifacts))\n self.backend.execute_async(\n task=task,\n command=module.command,\n artifacts=artifacts,\n resources=module.provenance.resources\n )", "def run(self):\n# log.trace(\" run task %s \", self.name)\n return self.target.send(self.params)", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def run(self):\n self.run()", "def main():\n module = IRODSPermissionModule()\n module.run()", "def run(self, script, *args, **kwargs):\n return self._run('run', script, *args, **kwargs)", "def setup_module(module):\n print(\"Start rishabhSetupModule of Program\")", "def run(self, host_list, module_name, module_args):\n # create play with tasks\n play_source = dict(\n name=\"Ansible Play\",\n hosts=host_list,\n gather_facts='no',\n tasks=[dict(action=dict(module=module_name, args=module_args))]\n )\n play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)\n\n # actually run it\n tqm = None\n self.callback = ResultsCollector()\n try:\n tqm = TaskQueueManager(\n inventory=self.inventory,\n variable_manager=self.variable_manager,\n loader=self.loader,\n options=self.options,\n passwords=self.passwords,\n )\n tqm._stdout_callback = self.callback\n tqm.run(play)\n finally:\n if tqm is not None:\n tqm.cleanup()", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def run_step(self):\n self.control_instance.run_step()", "def run(self):\n # check param and env\n self.sanity_check()\n\n # only-check mode\n if self.module.check_mode:\n self.module.exit_json(**self.result)\n\n self.init_session()\n\n action = self.select_action()\n action()", "def run(self):\n self.__power_on()\n\n self.__main()", "def run(self, host_list, module_name, module_args):\n # create play with tasks\n play_source = dict(\n name=\"Ansible Play\",\n hosts=host_list,\n gather_facts='no',\n tasks=[dict(action=dict(module=module_name, args=module_args))]\n )\n play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)\n\n # actually run it\n tqm = None\n self.callback = AnsibleTaskResultCallback()\n try:\n tqm = TaskQueueManager(\n inventory=self.inventory,\n variable_manager=self.variable_manager,\n loader=self.loader,\n options=self.options,\n passwords=self.passwords,\n )\n tqm._stdout_callback = self.callback\n tqm.run(play)\n finally:\n if tqm is not None:\n tqm.cleanup()", "def run_module_with_output(self, mod, payload=None, run_as_job=False, timeout=301):\n options_str = 'use {}/{}\\n'.format(mod.moduletype, mod.modulename)\n if self.rpc.consoles.console(self.cid).is_busy():\n raise MsfError('Console {} is busy'.format(self.cid))\n self.rpc.consoles.console(self.cid).read() # clear data buffer\n opts = mod.runoptions.copy()\n if payload is None:\n opts['DisablePayloadHandler'] = True\n\n # Set module params\n for k in opts.keys():\n options_str += 'set {} {}\\n'.format(k, opts[k])\n\n # Set payload params\n if mod.moduletype == 'exploit':\n opts['TARGET'] = mod.target\n options_str += 'set TARGET {}\\n'.format(mod.target)\n\n if 'DisablePayloadHandler' in opts and opts['DisablePayloadHandler']:\n pass\n elif isinstance(payload, PayloadModule):\n if payload.modulename not in mod.payloads:\n raise ValueError(\n 'Invalid payload ({}) for given target ({}).'.format(payload.modulename, mod.target))\n options_str += 'set payload {}\\n'.format(payload.modulename)\n for k, v in payload.runoptions.items():\n if v is None or (isinstance(v, str) and not v):\n continue\n options_str += 'set {} {}\\n'.format(k, v)\n else:\n raise ValueError('No valid PayloadModule provided for exploit execution.')\n\n # Run the module without directly opening a command line\n options_str += 'run -z'\n if run_as_job:\n options_str += \" -j\"\n self.rpc.consoles.console(self.cid).write(options_str)\n data = ''\n timer = 0\n while data == '' or self.rpc.consoles.console(self.cid).is_busy():\n time.sleep(1)\n data += self.rpc.consoles.console(self.cid).read()['data']\n timer += 1\n if timer > timeout:\n break\n return data", "def __run(self):\n sys.settrace(self.globaltrace)\n self.__run_backup()\n self.run = self.__run_backup", "def run(self):\n raise NotImplementedError('Run method not implemented in %s' % type(self).__name__)", "def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")", "def run(self, tmp=None, task_vars=None):\n if not task_vars:\n task_vars = {}\n\n result = ActionBase.run(self, tmp=tmp, task_vars=task_vars)\n result.update(\n self._execute_module(\n module_name=self.module_name,\n module_args=self._task.args,\n task_vars=task_vars,\n wrap_async=self._task.async_val\n )\n )\n\n return result", "def run(self):\n run_simple(self.hostname, self.port, self.dispatch,\n use_reloader=self.debug)", "def run(_):\n pass", "def run(self, *args, **kwargs):\n raise NotImplementedError('Tasks must define the run method.')", "def execute(self):\n # Do the task that the module is suppose to do.\n\n # Return a message telling the user that the task is done.\n return \"\\nTemplate module did ...\"", "def local_run(self, parameters=None) -> \"Run\":\n # NOTE -is there a use-case for this?\n raise NotImplementedError()", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module = module_manager.get_module_by_name(self._values[\"name\"])\n module_manager.uninstall_module(module)", "def run (self):\n t = threading.Thread(target=self.runController)\n t.start()", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module_manager.install_module(self.get_meta())", "def run_cmd(self):\r\n self.run = True", "def run_suite(self, name=\"_\", result=None):\n result = result if result else self.result\n self.mod_suites[name].run(result)", "def run(self):\n return self.private_run()", "def run_script(extension_invocation_info):\n acm.RunModuleWithParameters(__name__, acm.GetDefaultContext())", "async def run_controller(self, request):\n self.run()\n return {\"status\": \"ok\"}", "def run_game_logic(self):\n pass", "def run(self):\r\n self.log(texto=f\"Executando {self._name}\")", "def execute_module(self, module, *args, **opts):\n module_file = module.__file__\n if module_file.endswith('.pyc'):\n module_file = module_file[:-1]\n cmd = [self._path]\n if 'python_options' in opts:\n cmd.extend(opts['python_options'])\n del opts['python_options']\n cmd.append(module_file)\n cmd.extend(args)\n return get_cmd_output(*cmd, **opts)", "def run_module():\n parser = ap.ArgumentParser()\n parser.add_argument(\"--dry_run\", action=\"store_true\", help=\"When provided, return zero exit\"\n \" status irrespective of the number of failures\")\n args = parser.parse_args()\n params = read_params()\n assert \"validation\" in params\n dry_run_param = params[\"validation\"][\"common\"].get(\"dry_run\", False)\n params[\"validation\"][\"common\"][\"dry_run\"] = args.dry_run or dry_run_param\n validator = Validator(params)\n validator.validate().print_and_exit(\n get_structured_logger(__name__,\n params[\"common\"].get(\"log_filename\", None)),\n not (args.dry_run or dry_run_param))", "def run_script(self, params, config_no):\n raise NotImplementedError()", "def run(self, run):\n\n self._run = run", "def run_module_ground_plan(args):\n raise NotImplementedError", "def run(self, action_name=None, reuse=False, parallel=False):\n os.environ['WORKSPACE'] = self.workspace\n\n self.download_actions()\n self.instantiate_runners()\n\n if action_name:\n self.wf['action'][action_name]['runner'].run(reuse)\n else:\n for s in self.get_stages():\n self.run_stage(s, reuse, parallel)", "def _run(self):\n raise NotImplemented(\"Abstract method '_run' need to be defined\")", "def run():\n main()", "def run(self) -> None:\n soc = self.make_soc()\n soc_builder = self.build_soc(soc)\n if self.args.load:\n self.load(soc, soc_builder)", "def __run(self):\n sys.settrace(self.globaltrace) # set self.globaltrace before thread start\n self.__run_backup()\n self.run = self.__run_backup", "def Run(self):\n pass", "def _run(self):\n\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)", "def run(self):\n self.tcex.log.trace('run')", "def run(self):\n\n input_args = {}\n self._execute(input_args, self.args)", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def __run(self):\n try:\n sys.settrace(self.globaltrace)\n self.__run_backup()\n self.run = self.__run_backup\n except Exception, e:\n e.message = e.__class__.__name__ + ' in ' + self.getName() + ': ' + e.message\n self.__exception = e", "def RUN(self):", "def call(self, **params):\n # NOTE - use __call__??\n # TODO - move exec_script here?\n # TODO - call should handle param defaults\n from datapane.runner.exec_script import run\n\n run(self, params)", "def exec_module(self, module):\n\n if not self.filename.endswith(config.FILE_EXT) and not self.filename.endswith(\n \"__init__.py\"\n ):\n print(\"Fatal error: ExtensionLoader is asked to load a normal file.\")\n print(\"filename:\", self.filename)\n print(\"Expected extension:\", config.FILE_EXT)\n raise SystemExit\n\n name = module.__name__\n if module.__name__ == config.MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n config.MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n transforms.identify_requested_transformers(source)\n\n if config.TRANSFORMERS:\n original = source\n source = transforms.add_all_imports(source)\n source = transforms.apply_source_transformations(source)\n\n if config.DIFF and original != source:\n self.write_html_diff(name, original, source)\n\n if config.CONVERT and self.filename.endswith(config.FILE_EXT):\n print(\"############### Original source: ############\\n\")\n print(original)\n print(\"\\n############### Converted source: ############\\n\")\n print(source)\n print(\"=\" * 50, \"\\n\")\n\n source = transforms.apply_ast_transformations(source)\n exec(source, vars(module))", "def test_load_simple_module():\n loader = Loader()\n main_fname = loader.load(\"https://gist.githubusercontent.com/miohtama/80391980c2e73b285cfe/raw/dd89a55497ba33a6014453d9bb7432ab424c01cf/kivyhello.py#main\")\n mod = path_to_mod_name(main_fname)\n result = loader.run(mod, \"hello\")\n assert result == \"Hello there\"\n loader.close()", "def run():\n\n args = parse_arguments()\n app = rummage_app.RummageApp(args)\n app.MainLoop()\n\n return 0", "def run(self, *args, **kwargs):\n self.actions()\n\n for funcName, action in self._actions.items():\n actionName, actionParams = action\n if actionParams == None:\n func = getattr(self, funcName)\n print('Running %s.%s' % (self._title, funcName))\n func()\n else:\n self.runAction(actionName, funcName)\n self._db.commit_db()", "def runner(self):\n\n if self.__do_initialize:\n self.do_initialize()\n self.__do_initialize = False\n\n elif self.__do_start_running:\n self.do_start_running()\n self.__do_start_running = False\n\n elif self.__do_stop_running:\n #self.do_command(\"Stop\")\n self.do_stop_running()\n self.__do_stop_running = False\n\n elif self.__do_terminate:\n self.do_terminate()\n self.__do_terminate = False\n\n elif self.__do_pause_running:\n self.do_command(\"Pause\")\n self.__do_pause_running = False\n\n elif self.__do_resume_running:\n self.do_command(\"Resume\")\n self.__do_resume_running = False\n\n elif self.__do_recover:\n self.do_recover()\n self.__do_recover = False\n\n elif self.state(self.name) != \"stopped\":\n self.check_proc_heartbeats()\n self.check_proc_exceptions()\n\n if self.state(self.name) == \"running\":\n self.display_lbne_artdaq_output()", "def _run(self):\n raise NotImplementedError", "def runModtran(self, run):\n # Call for the MODTRAN card class\n modcard = ModtranCards()\n modcard.setDefaults()\n # Write the cards to the disk\n modcard.writeModtranCards(self.modtran_visits[run], self.outfilename)\n modcard.runModtran()", "def run_target(self):\n\n self.logger.info('run_target')\n\n # Clear all fields in GUI\n self.clear_gui_sig.emit()\n\n # Stop a previous target if it had a process running\n self.debugger.stop_target()\n\n if self.mode == self.AppMode.DYN_LINK:\n self.run_target_dynlink()\n elif self.mode == self.AppMode.DYN_LOAD:\n self.run_target_dynload()", "def run_command(self):\n global global_cycles_completed\n global_cycles_completed = 0\n\n cycles = self.on_spin(wx.SpinCtrl)\n\n # check that this function has been called on pressing run button\n text = \"\".join(\n [_(u\"run_command function has been called, number of cycles is: \"), str(cycles)])\n if self.state == 0:\n self.canvas_2d.render(text, True)\n else:\n self.canvas_3d.render()\n\n if cycles is not None: # if the number of cycles provided is valid\n self.monitors.reset_monitors()\n print(\"\".join([_(u\"Running for \"), str(cycles), _(u\" cycles\")]))\n self.devices.cold_startup()\n if self.run_network(cycles):\n global_cycles_completed += cycles", "def run(run_type, module, config):\n print(\" -----------------------------------------------------------------\")\n print(\" Beginning \" + run_type.lower() + \" test suite \")\n print(\" -----------------------------------------------------------------\")\n print(\"\")\n summary = run_quiet(run_type, module, config)\n print(\" -----------------------------------------------------------------\")\n print(\" \" + run_type.capitalize() + \" test suite complete \")\n print(\" -----------------------------------------------------------------\")\n print(\"\")\n return summary", "def run_component(self):\n raise NotImplementedError", "def _run ( self ) :\n raise NotImplementedError ( \"AppBase: method _run() should be implemented in a subclass\" )", "def run(self):\n if len(self.modules) == 0:\n raise ValueError(\"no modules registered\")\n while True:\n if not self._has_files() and not self._has_timeouts():\n break\n timeout = self._calculate_timeout()\n ready, _, _ = select(self._files(), [], [], timeout)\n self._dispatch_timeouts()\n for file in ready:\n self._trigger_file(file)", "def main():\n module = AnsibleModule(argument_spec=L3_interfaceArgs.argument_spec,\n supports_check_mode=True)\n\n result = L3_interface(module).execute_module()\n module.exit_json(**result)", "def onReloadAndTest(self, moduleName=\"NeedleFinder\"):\r\n print \"onReloadAndTest\"; msgbox(whoami())\r\n try:\r\n self.onReload()\r\n evalString = 'globals()[\"%s\"].%sTest()' % (moduleName, moduleName)\r\n tester = eval(evalString)\r\n tester.runTest()\r\n except Exception, e:\r\n import traceback\r\n traceback.print_exc()\r\n qt.QMessageBox.warning(slicer.util.mainWindow(),\r\n \"Reload and Test\", 'Exception!\\n\\n' + str(e) + \"\\n\\nSee Python Console for Stack Trace\")", "def run(self, threaded=False):\n if threaded:\n Thread(target=self._run).start()\n else:\n self._run()", "def run(self) -> None:\n self._hass.turn_on('scene.{0}'.format(self._args['scene']))", "def run():\n import hmmmAssembler ; reload(hmmmAssembler) # import helpers\n hmmmAssembler.main(Random) # this runs the code!", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass" ]
[ "0.749783", "0.74799657", "0.70269644", "0.69892204", "0.68432194", "0.67123836", "0.6491966", "0.64515406", "0.6416656", "0.64033836", "0.63711715", "0.6279245", "0.6268681", "0.6221671", "0.6220211", "0.62159735", "0.6164637", "0.61534256", "0.6147704", "0.61080045", "0.608897", "0.6048644", "0.6047645", "0.6001457", "0.59787935", "0.59534276", "0.59491783", "0.5943702", "0.5943702", "0.59371805", "0.5932379", "0.5920788", "0.59065926", "0.5866129", "0.5855437", "0.58542293", "0.5836355", "0.581457", "0.580469", "0.58029544", "0.5801845", "0.5780381", "0.5763335", "0.57618004", "0.57483095", "0.57394207", "0.57389385", "0.57294005", "0.57263696", "0.5719117", "0.5708641", "0.5702544", "0.5695344", "0.5690408", "0.5681478", "0.5672885", "0.5636631", "0.56073064", "0.5605247", "0.5604474", "0.560312", "0.5602961", "0.5592514", "0.5588278", "0.55877286", "0.558123", "0.55766815", "0.5568249", "0.5550012", "0.5542437", "0.5538024", "0.5537243", "0.55210525", "0.55201435", "0.55201435", "0.5499081", "0.5492346", "0.54908377", "0.5490739", "0.54891205", "0.5473868", "0.5473245", "0.5446779", "0.54382944", "0.54378456", "0.5436897", "0.5436599", "0.5432351", "0.5409478", "0.5400842", "0.5399001", "0.53910303", "0.5390627", "0.5386488", "0.53848356", "0.53807217", "0.5378016", "0.5378016", "0.5378016", "0.5378016", "0.5378016" ]
0.0
-1
keys_to_track order is important! Matches will be tested in this order.
def __init__(self, keys_to_track): self.keys_to_track = keys_to_track self.tracker = {} for key_to_track in self.keys_to_track: self.tracker[key_to_track] = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "def test_matching_tracks(self):\n\n # 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n # 8755 : Satisfied (feat. Miguel & Queen Latifah) by Sia\n # 6699 : Un Besito Mas (feat. Juan Luis Guerra) by Jesse & Joy\n targets = {5037: '2fGFaTDbE8aS4f31fM0XE4',\n 8755: '1ybJ2itxCxPCPkcA9sOgTO',\n 6699: '1182pxG4uNxr3QqIH8b8k0',\n }\n\n matches = {track.i_id: track.id\n for track in self.tracks\n if track.i_id in targets}\n\n for i_id, s_id in targets.iteritems():\n self.assertEqual(s_id, matches[i_id])", "def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')", "def matches(self):\n pass", "def match(self, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if self.tracker[key_to_track].has_key(match_val):\r\n return self.tracker[key_to_track][match_val]\r\n return None", "def test_keywords(self):\n\n test_cases = (\n makeTestCase('adele 21',\n AlbumResultMatcher(title=Equals('21'), artist=Equals('adele')),\n ArtistResultMatcher(title=Equals('adele'))),\n makeTestCase('kanye power',\n TrackResultMatcher(title=Equals('power', artist=Equals('kanye west'))),\n ArtistResultMatcher(title=Equals('kanye west')),\n AlbumResultMatcher(title=Equals('my beautiful dark twisted fantasy'))),\n makeTestCase('ratat party with children',\n TrackResultMatcher(title=Equals('party with children', artist=Equals('ratatat'))),\n ArtistResultMatcher(title=Equals('ratatat'))),\n makeTestCase('flobot fight with tools handlebars',\n TrackResultMatcher(title=Equals('handlebars')),\n ArtistResultMatcher(title=Equals('flobots')),\n AlbumResultMatcher(title=Equals('fight with tools')))\n )\n\n self._run_tests(tests, {})", "def report_keyset(self):\n for i, matchset in enumerate(self.matches):\n if len(matchset) == 1:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), BLUE)\n elif len(matchset) != 0:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), WHITE)\n else:\n print \"[%02d]\" % i, fmt(\"[X]\", RED)", "def add(self, obj, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if match_val is None or match_val == '':\r\n pass\r\n else:\r\n self.tracker[key_to_track][match_val] = obj", "def test_ms_track_search(helpers):\n item_from_xml, item_from_dict = common_tests(\n MSTrack,\n MS_TRACK_SEARCH_XML,\n MS_TRACK_SEARCH_DICT,\n \"00020064tracksearch:pilgrim\",\n helpers,\n )\n getter_attributes_test(\n \"artist\", item_from_xml, item_from_dict, MS_TRACK_SEARCH_DICT.get(\"artist\")\n )\n getter_attributes_test(\n \"uri\", item_from_xml, item_from_dict, MS_TRACK_SEARCH_DICT[\"uri\"]\n )", "def test_keys_eq(self):\n self.assertListEqual(self.result, self.expected)", "def match_source_key(self, match):\n raise NotImplementedError", "def testTrackDict3(self):\n\n goodTrackDict = {\n \"number\": \"12\", \"uid\": \"301356576\", \"codec_id\": \"S_TEXT/SSA\",\n \"codec_private_length\": \"783\", \"codec_private_data\": \"5b5363726\",\n \"language\": \"slv\", \"track_name\": \"Slovenian\", \"default_track\": \"0\",\n \"forced_track\": \"0\", \"enabled_track\": \"1\"\n }\n\n trackLine = _buildTrackLine(11, 'subtitles', goodTrackDict)\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n goodTrackDict,\n trackDict\n )", "def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[BondKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n if param.k_bondorder or param.length_bondorder:\n bond = topology.get_bond_between(*key)\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n assert self._get_uses_interpolation(parameter_handler)\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = BondKey(\n atom_indices=key,\n bond_order=fractional_bond_order,\n )\n\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=valence_terms,\n )", "def search_key_full_eq(self, key):\n for i in xrange(len(self.keys)):\n flag = 0\n for indx in range(4):\n if cmp(self.keys[i][indx],key[indx]) == 0:\n flag = 0\n continue\n if cmp(key[indx],\"*\") == 0:\n print \" visited internal! ==>\", self.keys[i]\n return self.pointers[i]\n elif self.keys[i][indx] > key[indx]:\n flag = 1\n else:\n flag = 2\n break \n # print \"searching %s:%s:%d\" %(str(self.keys[i]),str(key),flag)\n if flag == 1:\n if i > 0:\n print \" visited internal ==>\", self.keys[i] \n return self.pointers[i]\n else:\n print \" visited internal ==>\", self.keys[0] \n return self.pointers[0]\n elif flag == 0:\n print \" visited internals ==>\", self.keys[i]\n return self.pointers[i]\n print \" visited internalsed ==>\", self.keys[-1] \n return self.pointers[-1]", "def test_audio_features(self):\n\n # 1ehPJRt49h6N0LoryqKZXq, 8737: How Far I'll Go (Alessia Cara Version) by Alessia Cara\n # 2fGFaTDbE8aS4f31fM0XE4, 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n targets = {8737: {'danceability': 0.317,\n 'energy': 0.562,\n 'key': 9,\n 'loudness': -9.609,\n 'mode': 1,\n 'speechiness': 0.395,\n 'acousticness': 0.124,\n 'instrumentalness': 0.000144,\n 'liveness': 0.0667,\n 'valence': 0.127,\n 'tempo': 181.100,\n 'duration_ms': 175507,\n 'time_signature': 4,\n },\n 5037: {'danceability': 0.756,\n 'energy': 0.658,\n 'key': 11,\n 'loudness': -6.128,\n 'mode': 0,\n 'speechiness': 0.202,\n 'acousticness': 0.0581,\n 'instrumentalness': 0,\n 'liveness': 0.0674,\n 'valence': 0.640,\n 'tempo': 120.018,\n 'duration_ms': 247829,\n 'time_signature': 4,\n },\n }\n\n results = {track.i_id: track for track in self.tracks if track.i_id in targets}\n\n for target, expecteds in targets.iteritems():\n result = results[target]\n for key, expected in expecteds.iteritems():\n self.assertEqual(result.__getattr__(key), expected)", "def store_matches(\n self,\n parameter_handler: ImproperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n parameter_handler._assert_correct_connectivity(\n val,\n [\n (0, 1),\n (1, 2),\n (1, 3),\n ],\n )\n n_terms = len(val.parameter_type.k)\n for n in range(n_terms):\n smirks = val.parameter_type.smirks\n non_central_indices = [key[0], key[2], key[3]]\n\n for permuted_key in [\n (\n non_central_indices[i],\n non_central_indices[j],\n non_central_indices[k],\n )\n for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]\n ]:\n topology_key = ImproperTorsionKey(\n atom_indices=(key[1], *permuted_key),\n mult=n,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ImproperTorsions\",\n )\n self.key_map[topology_key] = potential_key", "def testTrackDict1(self):\n\n goodTrackDict = {\n \"number\": \"1\", \"uid\": \"1493619965\",\n \"codec_id\": \"V_MPEG4/ISO/AVC\", \"codec_private_length\": \"44\",\n \"codec_private_data\": \"014d4028ffe1001c80\", \"language\": \"eng\",\n \"pixel_dimensions\": \"1920x1080\", \"display_dimensions\": \"1920x1080\",\n \"default_track\": \"1\", \"forced_track\": \"0\", \"enabled_track\": \"1\",\n \"packetizer\": \"mpeg4_p10_video\", \"default_duration\": \"41708332\",\n \"content_encoding_algorithms\": \"3\"\n }\n\n trackLine = _buildTrackLine(0, 'video', goodTrackDict)\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n goodTrackDict,\n trackDict\n )", "def test_sample_mapped_keys(self):\r\n\r\n # With num_coverage=1 only the keys will be sampled\r\n actual = sample_mapped_keys(self.test_map, 1)\r\n self.assertEqual(actual, {'1': ['1'], '2': ['2']})\r\n\r\n actual = sample_mapped_keys(self.test_map, 3)\r\n for key in actual.keys():\r\n # check number of sampled keys\r\n self.assertEqual(3, len(actual[key]))\r\n for x in actual[key]:\r\n # check that sampled key is in the full list\r\n correct = list(self.test_map[key])\r\n correct.append(key)\r\n self.assertTrue(x in correct)", "def test_match_ordered(self):\n first = dict(\n a=1,\n b=2,\n )\n\n second = OrderedDict(\n b=2,\n a=1,\n )\n\n check_keys_match_recursive(first, second, [])", "def keysAll():", "def matches(hand):\n return list(sorted(match_iter(hand), reverse=True))", "def _match_tracks(artist, title, mb_tracks):\n # pylint: disable=R0914\n dbg(\"artists is %s\", artist)\n dbg(\"title is %s\", title)\n title_artist_str = c.g + title + c.w, c.g + artist + c.w\n xprint(\"\\nSearching for %s by %s\\n\\n\" % title_artist_str)\n\n def dtime(x):\n \"\"\" Format time to M:S. \"\"\"\n return time.strftime('%M:%S', time.gmtime(int(x)))\n\n # do matching\n for track in mb_tracks:\n ttitle = track['title']\n length = track['length']\n xprint(\"Search : %s%s - %s%s - %s\" % (c.y, artist, ttitle, c.w,\n dtime(length)))\n q = \"%s %s\" % (artist, ttitle)\n w = q = ttitle if artist == \"Various Artists\" else q\n query = generate_search_qs(w, 0, result_count=50)\n dbg(query)\n have_results = _search(q, query, splash=False, pre_load=False)\n\n if not have_results:\n xprint(c.r + \"Nothing matched :(\\n\" + c.w)\n continue\n\n results = g.model.songs\n s, score = _best_song_match(results, artist + \" \" + ttitle, length)\n cc = c.g if score > 85 else c.y\n cc = c.r if score < 75 else cc\n xprint(\"Matched: %s%s%s - %s \\n[%sMatch confidence: \"\n \"%s%s]\\n\" % (c.y, s.title, c.w, fmt_time(s.length),\n cc, score, c.w))\n yield s", "def _keys_in_sorted(move):\n return (move.picking_id.id, move.product_id.responsible_id.id)", "def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))", "def testTrackDict2(self):\n\n goodTrackDict = {\n \"number\": \"2\", \"uid\": \"3442966448\", \"codec_id\": \"A_VORBIS\",\n \"codec_private_length\": \"4412\", \"codec_private_data\": \"020808\",\n \"language\": \"jpn\", \"track_name\": \"2ch\\\\sVorbis\",\n \"default_track\": \"1\", \"forced_track\": \"0\", \"enabled_track\": \"1\",\n \"audio_sampling_frequency\": \"48000\", \"audio_channels\": \"2\"\n }\n\n trackLine = _buildTrackLine(1, 'audio', goodTrackDict)\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n goodTrackDict,\n trackDict\n )", "def keys(targets):", "def check_keys(self):", "def test_match_table_post(self):\n pass", "def getPossibleMatchesList(self):\n return [p for p in self._patterns if p.startswith(self._keyCode)]", "def store_matches(\n self,\n parameter_handler: ProperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map: dict[ProperTorsionKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n n_terms = len(val.parameter_type.phase)\n for n in range(n_terms):\n smirks = param.smirks\n if param.k_bondorder:\n # The relevant bond order is that of the _central_ bond in the torsion\n bond = topology.get_bond_between(key[1], key[2])\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = ProperTorsionKey(\n atom_indices=key,\n mult=n,\n bond_order=fractional_bond_order,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ProperTorsions\",\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=list(topology.propers),\n )", "def _fe_keyword_match(self, sample):\n result = OrderedDict()\n\n for item in self._keywords:\n result[item + \"_kw\"] = 1 if item in sample['fqdn'] else 0\n\n return result", "def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: \"Topology\",\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[\n Union[TopologyKey, LibraryChargeTopologyKey],\n PotentialKey,\n ] = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n topology_key = TopologyKey(atom_indices=key)\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n )\n self.key_map[topology_key] = potential_key\n\n if self.__class__.__name__ in [\n \"SMIRNOFFBondCollection\",\n \"SMIRNOFFAngleCollection\",\n ]:\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n assigned_terms=matches,\n topology=topology,\n valence_terms=valence_terms,\n )", "def handleMatch(self, m):\r\n pass", "def test_fuzz_insertions(self):\n key_range = 2 ** 64\n value_range = 1024\n key_set = set()\n \n d = SplayDict()\n for value in range(0, value_range):\n key = randint(0, key_range)\n d.put(key, value)\n key_set.add(key)\n \n keys = list(d.keys())\n self.assertEqual(len(keys), len(key_set), \"Length should reflect number of items inserted.\")\n self.assertEqual( len(keys), len(list(keys)), \"Iteration should find all items in tree.\")", "def global_matches(self, visualise=False):\n kp = self._common_keypoints(*self.views)\n if visualise:\n self.show()\n logger.debug(f'{len(kp)} common keypoints found')\n return kp", "def test_prefilter_exact_matches(self):\r\n seqs = [('s1 comment1', 'ACCTTGTTACTTT'), # three copies\r\n ('s2 comment2', 'ACCTTGTTACTTTC'), # one copy\r\n ('s3 comment3', 'ACCTTGTTACTTTCC'), # two copies\r\n ('s4 comment4', 'ACCTTGTTACTTT'),\r\n ('s5 comment5', 'ACCTTGTTACTTTCC'),\r\n ('s6 comment6', 'ACCTTGTTACTTT')]\r\n expected0 = [('QiimeExactMatch.s1', 'ACCTTGTTACTTT'),\r\n ('QiimeExactMatch.s2', 'ACCTTGTTACTTTC'),\r\n ('QiimeExactMatch.s3', 'ACCTTGTTACTTTCC')]\r\n expected1 = {'QiimeExactMatch.s1': ['s1', 's4', 's6'],\r\n 'QiimeExactMatch.s2': ['s2'],\r\n 'QiimeExactMatch.s3': ['s3', 's5']}\r\n expected = (expected0, expected1)\r\n p = OtuPicker({})\r\n actual = p._prefilter_exact_matches(seqs)\r\n self.assertEqual(actual, expected)", "def assertKeys(self, data, expected):\r\n self.assertEqual(sorted(data.keys()), sorted(expected))", "def on_match_start(self, *args, **kwargs):\n self._match = list()", "def match(self, item):", "def exact_match(self, Flow):\n for event in self._events:\n event.match_triad_chord(Flow)", "def test_search_key_phrase(self):\n # search via key phrase.\n test = self.data.search(key_phrase='testing entries.', all_names=True)\n self.assertIn('testing entries.', test[0].notes)", "def test_fromkeys(self):\n d = SplayDict.fromkeys(['a', 'b', 'c'], 1)\n self.assertIn('a' , d)\n self.assertIn('b' , d)\n self.assertIn('c' , d)\n self.assertEqual(d['a'] , 1)\n self.assertEqual(d['b'] , 1)\n self.assertEqual(d['c'] , 1)", "def test_post_chain_search(self):\n pass", "def sort_keys( self, results, ignore_cache=False ):\n if self.sorted_keys != None and (not ignore_cache):\n return self.sorted_keys\n mykeys = list( results.keys() ); mykeys.sort()\n self.sorted_keys = mykeys\n return mykeys", "def _in_keys(self, key, keys):\n # sorting required for comparison\n key.sort()\n return key in keys", "def getMatchIds(self):\n return sorted(self._matches.keys())", "def current_source_key(self, match):\n raise NotImplementedError", "def matchTracker(category):\n settings = settingsLoader()\n with open(torrentFileName, 'r') as TF:\n trackerInfo = TF.readline().split()[0]\n logging.debug(\"SORT: matchTracker: %s\" % trackerInfo)\n trackerList = (settings['categoriesDictSettings']\n [category]\n ['matches']\n ['matchTracker'])\n logging.debug(\"SORT: matchTracker: %s\" % trackerList)\n for EachTracker in trackerList:\n logging.debug(\"SORT:matchTracker: %s\" % EachTracker)\n if EachTracker in trackerInfo:\n return True\n return False", "def set_cur_tracked_neighbors(self, match_det_track):\n neighbor_det_ids = self.cur_neighbor['det_ids']\n\n if neighbor_det_ids is not None:\n tracked_track_ids = []\n tracked_relative_pos = []\n for idx in range(len(neighbor_det_ids)):\n det_id = neighbor_det_ids[idx]\n if det_id in match_det_track.keys():\n tracked_track_ids.append(match_det_track[det_id])\n tracked_relative_pos.append(self.cur_neighbor['relative_pos'][idx])\n self.cur_neighbor['tracked_track_ids'] = tracked_track_ids\n self.cur_neighbor['tracked_relative_pos'] = tracked_relative_pos\n else:\n self.cur_neighbor['tracked_track_ids'] = None\n self.cur_neighbor['tracked_relative_pos'] = None", "def keysWhichMatch(cls, *args):\n if len(cls.keys) < len(args) > 0:\n raise ValueError('Number of keys provided is too long.\\n'\n 'Len Class Keys: %s\\n'\n 'Len Provided Keys: %s\\n' % (len(cls.keys), len(args)))\n\n index = 0\n output = cls.db_key_tuples()\n\n for keyToCheck in args:\n temp = []\n for key in output:\n if key[index] == keyToCheck:\n temp.append(key)\n\n index += 1\n output = temp\n\n return output", "def test_cmp_to_key(self):\n def compare_pokemon(a, b):\n # ``a`` and ``b`` are tuples of ``(key, class)``.\n return (\n (a[1].popularity < b[1].popularity)\n - (a[1].popularity > b[1].popularity)\n )\n\n registry =\\\n SortedClassRegistry(\n attr_name = 'element',\n sort_key = cmp_to_key(compare_pokemon),\n )\n\n @registry.register\n class Onix(Pokemon):\n element = 'rock'\n popularity = 50\n\n @registry.register\n class Cubone(Pokemon):\n element = 'water'\n popularity = 100\n\n @registry.register\n class Exeggcute(Pokemon):\n element = 'grass'\n popularity = 10\n\n # The registry iterates over registered classes in descending\n # order by ``popularity``.\n self.assertListEqual(\n list(registry.values()),\n [Cubone, Onix, Exeggcute],\n )", "def _match(self) -> None:\n self.matched = [i for i in self.data if self.match(i)]\n self.unmatched = [i for i in self.data if not self.match(i)]", "def match_track_spotify(\n track: Track,\n access_token: str,\n match_title=True,\n match_album=True,\n match_artist=True,\n *match_custom\n) -> bool:\n # Make sure all the custom attributes are valid\n for req in match_custom:\n if not hasattr(track, req):\n raise AttributeError\n spotify_results = spotify_track_search(\n \"{} {}\".format(track.title, track.artist)\n if track.artist != UNKNOWN_ARTIST\n else track.title,\n access_token,\n )\n if \"error\" in spotify_results:\n print(\"error {} {}\".format(spotify_results[\"status\"], spotify_results[\"error\"]))\n return False\n for strack in spotify_results:\n if match_title and strack[\"name\"] != track.title:\n continue\n if match_artist and strack[\"artists\"][0][\"name\"] != track.artist:\n continue\n if match_album and strack[\"album\"][\"name\"] != track.album:\n continue\n reqs_matched = False if match_custom else True\n for req in match_custom:\n if req not in strack:\n raise AttributeError\n if strack[req] != getattr(track, req):\n reqs_matched = False\n break\n if not reqs_matched:\n continue\n track.spotify_id = strack[\"id\"]\n track.save()\n return True\n return False", "def test_sorted(self):\n keys = list(set([randint(0, 2^64) for i in range(0, 128)]))\n items = [(key, None) for key in keys]\n d = SplayDict(items)\n self.assertEquals(len(keys), len(d))\n self.assertEqual(len(keys) , len(list(d)))\n self.assertEqual(list(sorted(keys)) , list(d.keys()))", "def find(self, image, k=None, ratio=None):\n if not self._targets:\n return []\n k = 2 if k is None else k\n ratio = 0.75 if ratio is None else ratio\n keypoints, descriptors = self._detector.detectAndCompute(image, None)\n if len(keypoints) < self.min_match_count:\n return []\n matches = self._matcher.knnMatch(descriptors, k=int(k))\n matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]\n if len(matches) < self.min_match_count:\n return []\n matches_by_id = [[] for _ in xrange(len(self._targets))]\n for m in matches:\n matches_by_id[m.imgIdx].append(m)\n tracked = []\n for imgIdx, matches in enumerate(matches_by_id):\n if len(matches) < self.min_match_count:\n continue\n target = self._targets[imgIdx]\n p0 = [target.keypoints[m.trainIdx].pt for m in matches]\n p1 = [keypoints[m.queryIdx].pt for m in matches]\n p0, p1 = np.float32((p0, p1))\n H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)\n status = status.ravel() != 0\n if status.sum() < self.min_match_count:\n continue\n p0, p1 = np.int32((p0, p1))\n inliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if s]\n outliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if not s]\n quad = cv2.perspectiveTransform(target.quad.reshape(1, -1, 2), H).reshape(-1, 2)\n track = TrackedTarget(target=target, image=image, inliers=inliers, outliers=outliers, H=H, quad=quad)\n tracked.append(track)\n tracked.sort(key = lambda t: len(t.inliers), reverse=True)\n return tracked", "def test_token_matching_respects_order() -> None:\n nlp = spacy.blank(\"en\")\n if spacy.__version__ < \"3.0.0\":\n ruler = SpaczzRuler(nlp)\n nlp.add_pipe(ruler, first=True)\n else:\n ruler = nlp.add_pipe(\"spaczz_ruler\", first=True)\n ruler.add_patterns(\n [\n {\n \"label\": \"COMPANY\",\n \"pattern\": [\n {\"IS_UPPER\": True, \"OP\": \"+\"},\n {\"IS_PUNCT\": True, \"OP\": \"?\"},\n {\"TEXT\": {\"REGEX\": r\"S\\.\\s?[A-Z]\\.?\\s?[A-Z]?\\.?\"}},\n {\"IS_PUNCT\": True, \"OP\": \"?\"},\n ],\n \"type\": \"token\",\n \"id\": \"COMPANY SL\",\n }\n ]\n )\n doc = nlp(\"My company is called LARGO AND MARMG S.L.\")\n assert doc.ents[0].text == \"LARGO AND MARMG S.L.\"", "def check_exclusive_correct_top_level_keys_loaded_in_redis(tester: TestCase, expected_areas):\n expected_keys = []\n for area_name in expected_areas:\n # we expect each of the keys below for each area\n expected_keys.append(\"%s%s\" % (cities_boundaries_template_key, area_name))\n expected_keys.append(\"%s%s\" % (cities_places_template_key, area_name))\n expected_keys.append(\"%s%s\" % (cities_coordinates_template_key, area_name))\n\n tester.assertEqual(set(expected_keys), set(r.keys(\"*\")))", "def keys():", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_track(self):\r\n track = FedexTrackRequest(CONFIG_OBJ)\r\n track.TrackPackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'\r\n track.TrackPackageIdentifier.Value = '798114182456'\r\n track.send_request()\r\n \r\n for match in track.response.TrackDetails:\r\n # This should be the same tracking number on the response that we\r\n # asked for in the request.\r\n self.assertEqual(match.TrackingNumber, tracking_num)", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def test_get_top_step_matches(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results, properties_tree=self.test_tree)\n\n self.assertEqual(len(results._top_step_matches), 2)\n\n cad3_top_matches = results.get_sample_matches('C_chlorochromatii_CaD3', top=True)\n dsm_273_top_matches = results.get_sample_matches('C_luteolum_DSM_273', top=True)\n\n self.assertEqual(len(cad3_top_matches), 1)\n self.assertEqual(len(dsm_273_top_matches), 1)\n\n self.assertEqual(cad3_top_matches['Protein_Accession'].tolist()[0], 'NC_007514.1_940')\n self.assertEqual(dsm_273_top_matches['Protein_Accession'].tolist()[0], 'NC_007512.1_1088')", "def illustrate_matching(self):\n work_dir = self.work_dir\n draw_keys_oriented(work_dir+'matching_keys_im0.txt',\n work_dir+'input_0.orig.png',\n work_dir+'matching_keys_im0.png')\n draw_keys_oriented(work_dir+'matching_keys_im1.txt',\n work_dir+'input_1.orig.png',\n work_dir+'matching_keys_im1.png')\n draw_matches(work_dir+'matches.txt',\n work_dir+'input_0.orig.png',\n work_dir+'input_1.orig.png',\n work_dir+'OUTmatches.png')\n return 1", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def test_get_matches(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results, properties_tree=self.test_tree)\n\n self.assertEqual(len(\n results.step_matches.reset_index()[['Property_Identifier', 'Step_Number']].drop_duplicates()), 1)\n self.assertEqual(len(results.get_sample_matches('C_chlorochromatii_CaD3')), 5)\n self.assertEqual(len(results.get_sample_matches('C_luteolum_DSM_273')), 4)\n\n self.assertEqual(results.get_sample_matches('Your moms house'), None)", "def match_candidates(self):\n for event in self._events:\n event.match_candidates()", "def on_matching_rules(self, matching_rules):\n pass", "def altloc_match(self, other: AtomKey) -> bool:\n ...", "def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results", "def test_tracking_context(self):\n self.assert_expected_token_value({'foo': 'bar'})", "def test_find_phrase_matches3(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tobj_ut = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tself.assertEqual(dict(obj_ut),\n\t\t\t{'not good': [[2, -1, 0]], 'not very good': [[4, -1, 0]]})", "def Keys(self) -> _n_1_t_4:", "def recache_matches(self):\n matches = (Match.objects\n .filter(Q(winner = self) | Q(loser = self))\n .order_by('-played_time'))[:CACHED_RATING_LIMIT]\n matches = list(matches)\n matches.reverse()\n\n for match in matches:\n self.add_match(match, include_rank=True)\n\n self.save()", "def match(self, key):\n position = key.index(RANGE) # which index to skip\n\n def predicate(keys_0, keys_1):\n \"\"\"whether all other indices match search key\"\"\"\n num_matching = 0\n for i, (k_0, k_1) in enumerate(zip(keys_0, keys_1)):\n if i != position and k_0 == k_1:\n num_matching += 1\n return num_matching == len(key) - 1\n\n # all variables\n keys = list(self.variables.keys())\n # only those which match, including any from the RANGE index\n keys = [k for k in keys if predicate(k, key)]\n # sort along the RANGE index\n keys.sort(key=lambda k: k[position])\n\n return [self.variables[k] for k in keys]", "def test_token_function(self):\r\n assert TokenTestModel.objects().count() == 0\r\n for i in range(10):\r\n TokenTestModel.create(key=i, val=i)\r\n assert TokenTestModel.objects().count() == 10\r\n seen_keys = set()\r\n last_token = None\r\n for instance in TokenTestModel.objects().limit(5):\r\n last_token = instance.key\r\n seen_keys.add(last_token)\r\n assert len(seen_keys) == 5\r\n for instance in TokenTestModel.objects(pk__token__gt=functions.Token(last_token)):\r\n seen_keys.add(instance.key)\r\n\r\n assert len(seen_keys) == 10\r\n assert all([i in seen_keys for i in range(10)])", "def matches(self, matches):\n\n self._matches = matches", "def matches(self, matches):\n\n self._matches = matches", "def testMatch2(self):\n self.inv._literals_filter['fruit'] = ['pear', 'apple']\n self.assertFalse(self.inv._Match('fruit', []))\n self.assertFalse(self.inv._Match('fruit', ['grape', 'orange']))\n self.assertTrue(self.inv._Match('fruit', ['grape', 'apple']))\n self.assertTrue(self.inv._Match('fruit', [['grape'], ['orange', 'apple']]))", "def subStringMatchExact(target,key):\r\n index = find(target,key)\r\n #print 'here',target,key,index\r\n if index < 0 or len(key) <= 0 or len(target) <= 0:\r\n return ()\r\n matches = subStringMatchExact(target[index+len(key):len(target)],key)\r\n offset = index + len(key)\r\n temp_matches = ()\r\n #print matches\r\n if matches:\r\n for x in range(0, len(matches)) :\r\n temp_matches += ((matches[x] + offset),)\r\n #matches.insert(0,index)\r\n temp_matches = (index,) + temp_matches\r\n return temp_matches", "def get_known_words(label_matches):\r\n\tknown_words = set()\r\n\tfor key in label_matches.keys():\r\n\t\tif key[0] not in known_words:\r\n\t\t\tknown_words.add(key[0])\r\n\treturn known_words", "def top_matches(self, prefs, p1):\n #print 'top_matches', prefs, p1\n #print '\\n'\n return [(p2, self.similarity(prefs[p1], prefs[p2])) for p2 in prefs if p2 != p1]", "def keys(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Expression]:", "def match_tracks(\n self,\n others,\n subset=None,\n method=\"simple\",\n interpolate_to=\"other\",\n thresh_dist=250.0,\n time_frac=0.5,\n return_dist_matrix=False,\n beta=100.0,\n r_planet=EARTH_RADIUS,\n ):\n # Recursive call for each of the available categies\n if subset is None:\n if self.is_categorised:\n result = {}\n for subset_key in self.cat_labels:\n result[subset_key] = self.match_tracks(\n others,\n subset=subset_key,\n method=method,\n interpolate_to=interpolate_to,\n thresh_dist=thresh_dist,\n time_frac=time_frac,\n return_dist_matrix=return_dist_matrix,\n beta=beta,\n r_planet=r_planet,\n )\n return result\n else:\n subset = \"all\"\n\n # Select subset\n sub_gb = self[subset].gb\n if len(sub_gb) == 0 or len(others) == 0:\n return []\n if isinstance(others, list):\n # match against a list of DataFrames of tracks\n other_gb = pd.concat(\n [OctantTrack.from_df(df) for df in others],\n keys=range(len(others)),\n names=self._mux_names,\n ).gb\n elif isinstance(others, self.__class__):\n # match against another TrackRun\n other_gb = others[subset].gb\n else:\n raise ArgumentError('Argument \"others\" ' f\"has a wrong type: {type(others)}\")\n match_pairs = []\n if method == \"intersection\":\n for idx, ot in self._pbar(sub_gb): # , desc=\"self tracks\"):\n for other_idx, other_ot in self._pbar(other_gb, leave=False):\n times = other_ot.time.values\n time_match_thresh = time_frac * (times[-1] - times[0]) / HOUR\n\n intersect = pd.merge(other_ot, ot, how=\"inner\", left_on=\"time\", right_on=\"time\")\n n_match_times = intersect.shape[0]\n if n_match_times > 0:\n _tstep_h = intersect.time.diff().values[-1] / HOUR\n dist = intersect[[\"lon_x\", \"lon_y\", \"lat_x\", \"lat_y\"]].apply(\n lambda x: great_circle(*x.values, r_planet=r_planet), axis=1\n )\n prox_time = (dist < (thresh_dist * KM2M)).sum() * _tstep_h\n if (\n n_match_times * _tstep_h > time_match_thresh\n ) and prox_time > time_match_thresh:\n match_pairs.append((idx, other_idx))\n break\n\n elif method == \"simple\":\n # TODO: explain\n ll = [\"lon\", \"lat\"]\n match_pairs = []\n for other_idx, other_ct in self._pbar(other_gb): # , desc=\"other tracks\"):\n candidates = []\n for idx, ct in self._pbar(sub_gb, leave=False): # , desc=\"self tracks\"):\n if interpolate_to == \"other\":\n df1, df2 = ct.copy(), other_ct\n elif interpolate_to == \"self\":\n df1, df2 = other_ct, ct.copy()\n l_start = max(df1.time.values[0], df2.time.values[0])\n e_end = min(df1.time.values[-1], df2.time.values[-1])\n if (e_end - l_start) / HOUR > 0:\n # df1 = df1.set_index('time')[ll]\n # ts = pd.Series(index=df2.time)\n # new_df1 = (pd.concat([df1, ts]).sort_index()\n # .interpolate(method='values')\n # .loc[ts.index])[ll]\n tmp_df2 = pd.DataFrame(\n data={\"lon\": np.nan, \"lat\": np.nan, \"time\": df2.time}, index=df2.index\n )\n new_df1 = (\n pd.concat([df1[[*ll, \"time\"]], tmp_df2], ignore_index=True, keys=\"time\")\n .set_index(\"time\")\n .sort_index()\n .interpolate(method=\"values\")\n .loc[tmp_df2.time]\n )[ll]\n new_df1 = new_df1[~new_df1.lon.isnull()]\n\n # thr = (time_frac * 0.5\n # * (df2.time.values[-1] - df2.time.values[0]\n # + df1.time.values[-1] - df2.time.values[0]))\n thr = time_frac * df2.shape[0]\n dist_diff = np.full(new_df1.shape[0], FILLVAL)\n for i, ((x1, y1), (x2, y2)) in enumerate(\n zip(new_df1[ll].values, df2[ll].values)\n ):\n dist_diff[i] = great_circle(x1, x2, y1, y2, r_planet=r_planet)\n within_r_idx = dist_diff < (thresh_dist * KM2M)\n # if within_r_idx.any():\n # if (new_df1[within_r_idx].index[-1]\n # - new_df1[within_r_idx].index[0]) > thr:\n # candidates.append((idx, within_r_idx.sum()))\n if within_r_idx.sum() > thr:\n candidates.append((idx, within_r_idx.sum()))\n if len(candidates) > 0:\n candidates = sorted(candidates, key=lambda x: x[1])\n final_idx = candidates[-1][0]\n match_pairs.append((final_idx, other_idx))\n\n elif method == \"bs2000\":\n # sub_list = [i[0] for i in list(sub_gb)]\n sub_indices = list(sub_gb.indices.keys())\n other_indices = list(other_gb.indices.keys())\n dist_matrix = np.full((len(sub_gb), len(other_gb)), FILLVAL)\n for i, (_, ct) in enumerate(self._pbar(sub_gb, leave=False)): # , desc=\"self tracks\"):\n x1, y1, t1 = ct.coord_view\n for j, (_, other_ct) in enumerate(self._pbar(other_gb, leave=False)):\n x2, y2, t2 = other_ct.coord_view\n dist_matrix[i, j] = distance_metric(\n x1, y1, t1, x2, y2, t2, beta=float(beta), r_planet=r_planet\n )\n for i, idx1 in enumerate(np.nanargmin(dist_matrix, axis=0)):\n for j, idx2 in enumerate(np.nanargmin(dist_matrix, axis=1)):\n if i == idx2 and j == idx1:\n match_pairs.append((sub_indices[idx1], other_indices[idx2]))\n if return_dist_matrix:\n return match_pairs, dist_matrix\n else:\n raise ArgumentError(f\"Unknown method: {method}\")\n\n return match_pairs", "def test_compare_keys(self):\n dict1 = {\"a\":1 , \"b\":2 , \"c\":3}\n dict2 = {\"b\":1 ,\"a\":2 , \"c\":3}\n dict3 = {\"b\":1 ,\"d\":2 , \"c\":3}\n self.assertEqual(True, comparator.compare_keys(dict1, dict2))\n self.assertEqual(False, comparator.compare_keys(dict2, dict3))", "def _matchUp(token, labels):\n ret = []\n\n # strip parens from the token, since they often appear in the\n # display_name, but are removed from the comment.\n token = utils.normalizeToken(token)\n decimalToken = _parseNumbers(token)\n\n # Iterate through the labels in descending order of label importance.\n for label_key in ['name', 'unit', 'qty', 'comment', 'range_end']:\n label_value = labels[label_key]\n if isinstance(label_value, basestring):\n for n, vt in enumerate(tokenizer.tokenize(label_value)):\n if utils.normalizeToken(vt).decode('utf-8') == token:\n ret.append(label_key.upper())\n\n elif decimalToken is not None:\n if label_value == decimalToken:\n ret.append(label_key.upper())\n\n return ret", "def test_get_igv_tracks():\n\n # GIVEN an app with public cloud tracks initialized\n patched_track = {\"37\": [{\"name\": \"test track\"}]}\n cloud_tracks.public_tracks = patched_track\n\n # THEN the get_igv_tracks controller should return the default tracks\n igv_tracks = get_igv_tracks()\n for track in IGV_TRACKS[\"37\"]:\n assert track[\"name\"] in igv_tracks\n\n # and the name of the public cloud track\n assert \"test track\" in igv_tracks", "def get_similar_tracks_for_original_track(track_svc, original_track):\n similar_tracks: Dict[str, StreamingServiceTrack] = {}\n for svc in SUPPORTED_STREAMING_SERVICES:\n if svc is track_svc:\n continue\n\n track = None\n with svc() as svc_client:\n try:\n track = svc_client.search_one_track(\n original_track.searchable_name\n )\n except Exception:\n log.error(\"Searching one track\", exc_info=True)\n\n if track:\n if tracks_are_similar(original_track, track):\n similar_tracks[svc.__name__] = track\n else:\n similar_tracks[svc.__name__] = None\n log.warning(\n f'Track title \"{track.searchable_name}\" for '\n f\"svc {svc.__name__} is not similar enough to \"\n f'\"{original_track.searchable_name}\".'\n )\n else:\n similar_tracks[svc.__name__] = None\n\n return similar_tracks", "def test_get_filter_results_location_includes(self):\r\n\r\n data_file = open('data.txt', 'r')\r\n twitter_dict = tf.process_data(data_file)\r\n data_file.close()\r\n\r\n actual = tf.get_filter_results(twitter_dict, ['tomCruise', \\\r\n 'PerezHilton', 'tomfan'], {'follower': 'katieH'})\r\n expected = ['tomCruise', 'PerezHilton']\r\n self.assertEqual(actual, expected)", "def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset.lower() in pair:\n holder.append(pair)\n pairsByTickers[asset] = holder\n return pairsByTickers", "def assert_keys_match(keys, expected, allow_missing=True):\n if not allow_missing:\n missing = expected - keys\n assert not missing, 'missing keys: %s' % missing\n extra = keys - expected\n assert not extra, 'extraneous keys: %s' % extra", "def test_fuzzy_matching_multi_match(\n nlp: Language, countries: List[Dict[str, Any]]\n) -> None:\n ruler = SpaczzRuler(nlp, spaczz_fuzzy_defaults={\"min_r2\": 85})\n ruler.add_patterns(countries)\n doc = nlp(\"This is a test that should find Northern Ireland and Ireland\")\n doc = ruler(doc)\n matches = [(ent.ent_id_, ent.text) for ent in doc.ents if ent.label_ == \"COUNTRY\"]\n assert matches == [(\"Northern Ireland\", \"Northern Ireland\"), (\"Ireland\", \"Ireland\")]", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def split_by_home(matches, team_id):\n\n sorted_matches = {\n \"home\": [],\n \"away\": []\n }\n\n for match_id, match in matches.items():\n if match.hometeam.team_id == team_id:\n sorted_matches[\"home\"].append(match_id)\n elif match.awayteam.team_id == team_id:\n sorted_matches[\"away\"].append(match_id)\n\n return sorted_matches", "def step040():\n logger.logMessage('Begin: matching work files')\n sKey = ''\n mKey = ''\n def readFile(f):\n line = f.readline().rstrip()\n if line == '':\n key = 'ZZZZZZZZZZZZZZZZZZZZZZZZZ'\n return None,key\n else:\n sp = line.split(';')\n key = '{0:25s}'.format(sp[1])[0:19]\n return sp,key\n\n m = open(dbDumpFile,'r')\n s = open(sortedCandidatesFile,'r')\n numrecs = 0\n with open(matchFile,'w') as match:\n mFields,mKey = readFile(m)\n sFields,sKey = readFile(s)\n while mFields != None or sFields != None:\n if sKey == mKey:\n match.write('{0:014d};{1:25s};{2:32s};{3:31s}\\n'.format(int(mFields[0]),mKey,sFields[2],sFields[3]))\n numrecs += 1\n if numrecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records matched\".format(numrecs))\n sFields,sKey = readFile(s)\n mFields,mKey = readFile(m)\n elif sKey < mKey:\n sFields,sKey = readFile(s)\n else:\n logger.logMessage(level='WARNING',message='Record not matched: {0}'.format(mFields))\n mFields,mKey = readFile(m)\n logger.logMessage(\"Total matched: {0:d}\".format(numrecs))\n\n m.close()\n s.close()\n logger.logMessage('End : matching work files')", "def add_match(self, event):\n event = copy.deepcopy(event)\n # Convert datetime's back to timestamps\n ts = self.rules.get(\"timestamp_field\")\n if ts in event:\n event[ts] = dt_to_ts(event[ts])\n\n self.matches.append(event)", "def match_api_keys(key, ip):", "def buffers_with_matches(self):\n _set = set(self.misc.buffers()) & set(self.matches.keys())\n _set.add(self.curr_buf.number)\n return list(_set)", "def _add_to_recently_called(self, match, reporter):\n if utils.istrcmp(match.player1_tag, reporter):\n other = match.player2_tag\n else:\n other = match.player1_tag\n self.recently_called[other] = time()" ]
[ "0.72052854", "0.6073225", "0.5684413", "0.5646537", "0.55177075", "0.54479295", "0.54022795", "0.5286637", "0.5255114", "0.52513975", "0.5236649", "0.52352864", "0.5173165", "0.5170619", "0.5168231", "0.51618785", "0.51518595", "0.51467997", "0.5143868", "0.51270205", "0.5095624", "0.5080584", "0.5073778", "0.5068419", "0.5047828", "0.50372577", "0.5029652", "0.5017005", "0.5014709", "0.5010062", "0.49436924", "0.49363533", "0.4930932", "0.49299142", "0.4912932", "0.49055326", "0.49040818", "0.49001014", "0.4899912", "0.48959452", "0.4891762", "0.4877048", "0.48511842", "0.48500744", "0.4846642", "0.48455954", "0.48385313", "0.48181736", "0.4817294", "0.48079506", "0.48051062", "0.47849765", "0.47821406", "0.47773936", "0.4773901", "0.4763567", "0.47580227", "0.4756838", "0.47409868", "0.47269493", "0.4724394", "0.47153378", "0.4712549", "0.47106168", "0.470731", "0.47038347", "0.47031593", "0.4697062", "0.4693925", "0.4690416", "0.4685003", "0.46813932", "0.46786878", "0.4663002", "0.46624523", "0.46567234", "0.46567234", "0.46456993", "0.46456322", "0.4644703", "0.46441078", "0.46425253", "0.46395323", "0.46254978", "0.46200016", "0.46120065", "0.46096689", "0.46073213", "0.46033794", "0.45916963", "0.45887607", "0.4585684", "0.4585684", "0.4582933", "0.4581277", "0.45746195", "0.45712337", "0.45690817", "0.45671412", "0.45652816" ]
0.6227405
1
Add obj as a match for match_dict values. Checks to make sure match_dict keys are valid.
def add(self, obj, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if match_val is None or match_val == '': pass else: self.tracker[key_to_track][match_val] = obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "def add_match(self, event):\n event = copy.deepcopy(event)\n # Convert datetime's back to timestamps\n ts = self.rules.get(\"timestamp_field\")\n if ts in event:\n event[ts] = dt_to_ts(event[ts])\n\n self.matches.append(event)", "def addMatch(self, id, match):\n self._matches[id] = match", "def add_matching(self, matching: list):\n self.matching = matching", "def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]", "def add_match(self, match):\n self.matches.append(match)", "def match(self, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if self.tracker[key_to_track].has_key(match_val):\r\n return self.tracker[key_to_track][match_val]\r\n return None", "def match(self, data_instance: Dict[str, Any]) -> bool:", "def add_match(self, match):\n\n # update cache results with game result\n if self.cached_results:\n results = json.loads(self.cached_results)\n else:\n results = []\n\n winner = match.winner == self\n opponent = match.loser if winner else match.winner\n new_rating = match.winner_rating_after if winner else \\\n match.loser_rating_after\n\n results.append({\n 'winner': winner,\n 'opponent_name': opponent.name,\n 'played_time': str(match.played_time),\n 'played_timestamp': get_timestamp(match.played_time)\n })\n self.cached_results = json.dumps(results[-CACHED_RATING_LIMIT:])\n\n # update player with new rating\n self.update_rating(new_rating, match)\n\n # save the player in the database\n self.save()", "def __addMatch(db, match, oldRatings, newRatings):\n c = db.cursor()\n player1EloChange = newRatings[0] - oldRatings[0]\n player2EloChange = newRatings[1] - oldRatings[1]\n\n player1 = __playerCache[match['player1-id']]\n player2 = __playerCache[match['player2-id']]\n winner = __playerCache[match['winner-id']]\n\n c.execute(\"INSERT INTO matches \"\n \"VALUES(%s,%s,'%s','%s','%s','%s',%s,%s,%s,%s)\" %\n (match['id'], match['tournament-id'],\n match['updated-at'],\n player1['email-hash'], player2['email-hash'],\n winner['email-hash'],\n oldRatings[0], oldRatings[1],\n player1EloChange, player2EloChange))", "def add_match(self, f, exclusions=None, **match_kwargs):\n assert not self._checked, 'can\\'t add after matchlist has been checked'\n\n if not match_kwargs: # Do nothing if no match_kwargs.\n return f\n\n self._verify_match_kwargs(match_kwargs, exclusions)\n self.matchers.append((match_kwargs, exclusions, f))\n return f", "def _add_object(self, object_dict):\n # Attempt to map the object first. This will raise an\n # ItemExistsError if a named object of the same type already\n # exists.\n self._add_object_to_map(self.append_key, object_dict)\n\n # Add the object to the end of the model.\n # TODO: which objects need added to the beginning?\n self.model_dict[self.append_key] = object_dict\n\n # Update append key.\n self._update_append_key()", "def add_ignored_match(self, secret: dict) -> None:\n\n matches_ignore = [\n match[\"match\"] if isinstance(match, dict) else match\n for match in self.matches_ignore\n ]\n if secret[\"match\"] not in matches_ignore:\n self.matches_ignore.append(secret)\n else:\n for match in self.matches_ignore:\n if (\n isinstance(match, dict)\n and match[\"match\"] == secret[\"match\"]\n and match[\"name\"] == \"\"\n ):\n match.update({\"name\": secret[\"name\"]})", "def __add__(aMatchList, bMatchList):\n for id in bMatchList._matches.keys():\n aMatchList.addMatch(id, bMatchList._matches[id])\n return aMatchList", "def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False):\n matcher = re.compile(pattern)\n for line in iterable:\n match = matcher.match(line)\n if not match:\n if must_match:\n raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must match is set to true\" % (line, pattern))\n else:\n continue\n key = match.group(1).strip()\n try:\n value = match.group(2).strip()\n value = json.loads(value) if len(value) > 0 else None\n if add_only_keys is None or key in add_only_keys:\n dictionary[key] = value\n logging.debug(\"Key-value item (%s=%s) has been parsed and added to dictionary\", key, str(value))\n except ValueError as err:\n if not ignore_errors:\n raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error is %s\" % (value, key, line, str(err)))", "def _add_object_to_map(self, model_key, object_dict):\n # Grab reference to the object sub-dict.\n object_map = self.model_map['object']\n\n # Get type of object.\n obj_type = object_dict['object']\n\n # Define key object pair\n key_obj = [model_key, object_dict]\n\n # If this type isn't in the map, add it. NOTE: this can lead to\n # empty entries if the object isn't named.\n if obj_type not in object_map:\n object_map[obj_type] = {}\n\n try:\n # Never try to map an already existing named object.\n if object_dict['name'] in object_map[obj_type]:\n s = '{} already exists in the {} map!'\n raise ItemExistsError(s.format(object_dict['name'], obj_type))\n\n except KeyError:\n # Unnamed object. Add it to the unnamed list.\n self.model_map['object_unnamed'].append(key_obj)\n\n else:\n # Named object, map it.\n object_map[obj_type][object_dict['name']] = key_obj\n\n # No need to return; we're directly updating self.model_map", "def add_match_result(self, variable, match_result):\n if isinstance(match_result, MatchResult):\n self._match_result_dict[variable] = match_result\n else:\n raise ValueError(\"Input must be a valid TimeSeries object\")", "def _add_found_values(self, transform_dict, transform_key,\n lookup_key, lookup_dict):\n try:\n if self._verify_key_exists(lookup_key, lookup_dict):\n transform_dict[transform_key] = \\\n ''.join(nested_lookup(lookup_key, lookup_dict))\n except TypeError:\n pass\n if isinstance(lookup_key, list):\n transform_dict[transform_key] = \\\n ''.join(self._key_list_search(lookup_key, lookup_dict))\n return transform_dict", "def match_info(info_dict):\n return True", "def test_addEntryByDict(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry({'term': 'foo', 'tags': 'a', 'value': '1'})\n self.assertTrue(b)", "def add_rule_to_dict(rule_dict, lhs, rhs):\n if rhs not in rule_dict:\n rule_dict[rhs] = list()\n rule_dict[rhs].append(lhs) \n return rule_dict", "def findMatches3(personDict,matches,skepticalMatches,additionalMatches,personDict2):\n dictConsidered = personDict['ALIAS']\n for alias in dictConsidered:\n if alias == \"\":\n continue\n pairs = itertools.combinations(dictConsidered[alias],2)\n for p in pairs:\n k = tuple(sorted(p))\n if (k not in matches) and (k not in skepticalMatches) and (k not in additionalMatches):\n info1 = personDict['EnterpriseID'][p[0]]\n info2 = personDict['EnterpriseID'][p[1]]\n \n info1b = personDict2['EnterpriseID'][p[0]]\n info2b = personDict2['EnterpriseID'][p[1]]\n score = getScorePair(info1b,info2b)\n if score>=7:\n additionalMatches[k] = score\n\n return additionalMatches", "def test_updatewithdictionarybycomparingdictionaries(self):\n s1 = Square(10, 2, 1, 9)\n s1_dictionary = s1.to_dictionary()\n s2 = Square(1, 1)\n s2.update(**s1_dictionary)\n self.assertEqual(s1.__dict__, s2.__dict__)", "def matches(self, matches):\n\n self._matches = matches", "def matches(self, matches):\n\n self._matches = matches", "def test_addDict(self):\n lidi = []\n lidi.append({'term': 'foo', 'tags': 'a', 'value': '1'})\n lidi.append({'term': 'bar', 'tags': 'a, b', 'value': '2'})\n lidi.append({'term': 'gnark', 'tags': 'a, c', 'value': '3'})\n self.g.add_dict(lidi)", "def insert_match(self, gameid):\n if Match.query.filter(Match.gameid == gameid).first():\n self.logger.info(\"Match {} already exists in the DB\".format(gameid))\n return True\n match_json = self.rc.get_match(gameid)\n if not match_json:\n self.logger.warning(\"API did not return data for this gameid: {}\".format(gameid))\n return False\n match_json = self.lower_keys(match_json)\n # Get column names\n match_columns = Match.__table__.columns.keys()\n # Remove all k:v pairs that do not match column names\n to_del = []\n for k, v in match_json.items():\n if k not in match_columns:\n to_del.append(k)\n # del match_json[k]\n for k in to_del:\n del match_json[k]\n match = Match(**match_json)\n match.gamecreation = datetime.utcfromtimestamp(match.gamecreation // 1000)\n self.db.session.add(match)\n self.db.session.commit()\n return True", "def replace(self, matchobj):\n key = matchobj.group(1)\n if self.dict.has_key(key):\n return self.dict[key]\n else:\n return ''", "def addtwodimdict(self, thedict, key_a, key_b, val):\r\n if key_a in thedict:\r\n thedict[key_a].update({key_b: val})\r\n else:\r\n thedict.update({key_a: {key_b: val}})", "def add(self, obj: model.IdentifiableArtefact):\n for field, field_info in direct_fields(self.__class__).items():\n # NB for some reason mypy complains here, but not in __contains__(), below\n if isinstance(\n obj, get_args(field_info.outer_type_)[1], # type: ignore [attr-defined]\n ):\n getattr(self, field)[obj.id] = obj\n return\n raise TypeError(type(obj))", "def _match_all(self, obj, criteria):\n return all(getattr(obj, key_, None) == value_ for\n key_, value_ in criteria.items())", "def add_item(key, obj, dst):\n\n if key not in dst:\n dst[key] = []\n dst[key].append(obj)", "def matches_dict(self, answer_dict):\n\n return self.matches(Answer(\n answer_dict['group_id'],\n answer_dict['block_id'],\n answer_dict['answer_id'],\n \"\",\n answer_dict['group_instance'],\n answer_dict['answer_instance'],\n ))", "def match(dictionary, query, policy='relaxed', matches=None):\n if query is None:\n return True\n assert policy in ['relaxed', 'strict'], \"\"\n\n for field, value in query.iteritems():\n if field not in dictionary:\n if policy == 'relaxed':\n continue\n else:\n return False\n if isinstance(value, list) or not isinstance(value, basestring):\n values = value if isinstance(value, list) else [value]\n if dictionary[field] not in values:\n return False\n if matches is not None:\n matches['%s_0' % (field)] = dictionary[field]\n else:\n if value == '':\n # Take special care if value is an empty string\n if value != dictionary[field]:\n return False\n elif matches is not None:\n matches['%s_0' % (field)] = dictionary[field]\n continue\n else:\n match = re.compile(value).match(dictionary[field])\n if not match:\n return False\n else:\n if matches is not None:\n matches['%s_0' % (field)] = dictionary[field]\n for index, group in enumerate(match.groups()):\n matches['%s_%d' % (field, index+1)] = group\n continue\n return True", "def _match(self, document: dict, query: dict) -> bool:\n\n matches = [\n self._match(document.get(key), value)\n if isinstance(value, dict) and isinstance(document.get(key), dict)\n else document.get(key) == value\n for key, value in query.items()\n ]\n return all(matches)", "def append(self, obj):\r\n self.record_count += 1\r\n \r\n if type(obj) == dict:\r\n self._probe_record(obj)\r\n else:\r\n self._probe_row(obj)", "def test_dict(self, obj: dict) -> None:\r\n properties = read_properties(obj)\r\n for key, value in properties.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if read_type(value) == 'object':\r\n logger.debug('dict -> dict')\r\n self.test_dict(obj=value)\r\n elif read_type(value) == 'array':\r\n logger.debug('dict -> list')\r\n self.test_list(array=value)", "def add(enforcer_dict, key, value):\n enforcer_dict['d'] = 4\n assert other.keystring == 'abcd'\n assert other.valuesum == 10\n\n enforcer_dict.update(dict(e=5, f=6))\n assert other.keystring == 'abcdef'\n assert other.valuesum == 21", "def _add_match(self, backward_match_list):\n\n already_in = False\n\n for b_match in backward_match_list:\n for l_match in self.match_list:\n if b_match.match == l_match.match:\n index = self.match_list.index(l_match)\n self.match_list[index].qubit.append(b_match.qubit[0])\n already_in = True\n\n if not already_in:\n self.match_list.append(b_match)", "def match_source_key(self, match):\n raise NotImplementedError", "def validate_match(context):\n\n schema = isinstance(match_schema, ValidateViewHook) and match_schema(request) or match_schema\n\n state = Dummyobj()\n state.request = request\n state.context = context\n try:\n return schema.to_python(request.matchdict, state=state)\n except formencode.Invalid as exc:\n unpacked = exc.unpack_errors()\n request.set_property(lambda ctx: unpacked,\n invalid_match_attr, reify=True)\n if raise_exc is True:\n _raise(invalid_match_exc, unpacked)\n else:\n return {}", "def add_match(self, match_id, team1, team2, team_tag1, team_tag2, ip, team1_id, team2_id):\n\n query_select = sC.SELECT_MATCHES_WHERE_IP_.format(ip)\n match_id_ = self.util.match_with_ip_check(query_select)\n\n if match_id_:\n return match_id_\n\n query = sC.INSERT_MATCHES_VALUES_.format(int(match_id),\n team1[:pC.MAX_CHARACTERS_SQL], team2[:pC.MAX_CHARACTERS_SQL],\n team_tag1[:pC.MAX_CHARACTERS_SQL], team_tag2[:pC.MAX_CHARACTERS_SQL],\n ip, team1_id, team2_id, datetime.now())\n\n self.execute_query(query)", "def add_attributes_from_dict(self, dict):\n for key in dict:\n val = dict[key]\n if hasattr(self, key):\n setattr(self, key, val)", "def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[BondKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n if param.k_bondorder or param.length_bondorder:\n bond = topology.get_bond_between(*key)\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n assert self._get_uses_interpolation(parameter_handler)\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = BondKey(\n atom_indices=key,\n bond_order=fractional_bond_order,\n )\n\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=valence_terms,\n )", "def test_add_nested_dict_value():\n\n recursiveKeys = [\n \"reward_signals.extrinsic.strength\",\n \"reward_signals.extrinsic.gamma\",\n \"reward_signals.curiosity.strength\",\n \"reward_signals.curiosity.gamma\",\n ]\n\n expectedDict = {\n 'reward_signals': {\n 'curiosity': {'gamma': 1.0, 'strength': 1.0},\n 'extrinsic': {'gamma': 1.0, 'strength': 1.0},\n }\n }\n\n dictionary = {}\n\n for key in recursiveKeys:\n common.add_nested_dict_value(dictionary, key, 1.0)\n\n assert dictionary == expectedDict\n\n dictionary = {'reward_signals': {'extrinsic': {}}}\n\n for key in recursiveKeys:\n common.add_nested_dict_value(dictionary, key, 1.0)\n\n assert dictionary == expectedDict\n\n dictionary = {'reward_signals': {'extrinsic': {'gamma': 0.99}}}\n\n for key in recursiveKeys:\n common.add_nested_dict_value(dictionary, key, 1.0)\n\n assert dictionary == expectedDict", "def addMatchEntry(tmpFD, match, mtype, val):\n tmpFD.write(f\" {match} {mtype} {val}\\n\")", "def _add_data_to_response(self, desc_matches, url, page_matches, response):\n\n if desc_matches:\n response.append({\n 'url': url,\n 'matches': desc_matches\n })\n\n if page_matches:\n response.append({\n 'url': url,\n 'matches': page_matches\n })", "def match_user(dictionary: 'Climatematch', \n match: 'NGOMatch') -> Dict[str, tuple(int,int)]:\n\n total = {}\n \n for user in dictionary:\n total[user] = {}\n skillsCount = 0\n interestCount = 0\n for sk in dictionary[user]['skills']: \n if sk in match['skills']['technical']:\n skillsCount = skillsCount + 1\n elif sk in match['skills']['interpersonal']:\n skillsCount = skillsCount + 1\n \n for inte in dictionary[user]['interest']: \n if inte in match['interest']:\n interestCount = interestCount + 1\n \n total[user][(skillsCount, interestCount)]\n return total", "def _merge_by_query(self, obj_dict):\n _res = self.__session.query(obj_dict[\"class\"]).filter_by(**obj_dict[\"query_dict\"]).first()\n\n if _res is None:\n self._add(obj_dict[\"instance\"])\n else:\n if hasattr(obj_dict[\"instance\"], 'attributes') and \\\n hasattr(obj_dict[\"instance\"], 'p_key'):\n for attr in obj_dict[\"instance\"].attributes:\n if attr not in obj_dict[\"instance\"].p_key:\n setattr(_res, attr, getattr(obj_dict[\"instance\"], attr))\n # updating the instance\n obj_dict[\"instance\"] = _res\n else:\n raise AttributeError(\"Class variable (attributes / p_key) not set for %s\" %\n (obj_dict[\"instance\"],))", "def match(self, obj):\n\n return self._match(self.rule, obj)", "def add_dict(self, d, record_cls=None):\n rc = record_cls or self._ELE_CLS\n self.add_container(self.from_dict(d, record_cls=rc))\n return len(self)", "def add_value(self, thing_key, dkey, value):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n dic = {}\n dic[dkey] = value\n self.things_dict[thing_key] = dic\n self.changed.append(thing_key)\n return True\n return False", "def _merge_obj(result, obj, pointer=''): # changed code\n if not isinstance(result, dict):\n result = {}\n\n if not isinstance(obj, dict):\n return obj\n\n for key, value in obj.items():\n if isinstance(value, dict):\n target = result.get(key)\n if isinstance(target, dict):\n _merge_obj(target, value, pointer=f'{pointer}/{key}') # changed code\n continue\n result[key] = {}\n _merge_obj(result[key], value, pointer=f'{pointer}/{key}') # changed code\n continue\n\n # new code\n if key in result:\n pointer_and_key = f'{pointer}/{key}'\n # Exceptions.\n if (value is None and pointer_and_key == '/definitions/Milestone/properties/documents/deprecated' and\n repo_name in ('ocds_milestone_documents_extension', 'public-private-partnerships')):\n warnings.warn(f're-adds {pointer}')\n elif (value == [] and pointer_and_key == '/required' and\n repo_name == 'ocds_pagination_extension'):\n warnings.warn(f'empties {pointer_and_key}')\n else:\n if is_profile:\n message = ' - check for repeats across extension_versions.json, dependencies, testDependencies'\n else:\n message = ''\n raise Exception(f'unexpectedly overwrites {pointer_and_key}{message}')\n\n if value is None:\n result.pop(key, None)\n continue\n result[key] = value\n return result", "def add(self, key, obj):\n with self._lock:\n slot = self._dict.get(key, None)\n if slot is None:\n slot = [obj, 0]\n else:\n slot[1] += 1\n self._dict[key] = slot", "async def add_dict(self, dic):\n for key in dic:\n await self.set(key, dic[key])", "def matchRecordAttrs(mapobj, attrs):\n for k,v in iteritems(attrs):\n try: val = getattr(mapobj, k)\n except AttributeError: # k isn't an attr of record\n if v: return False # if k doesn't exist in mapobj but was required, no match\n else: continue # otherwise ignore attributes that aren't defined for the given map record\n if val != v: return False # if any criteria matches, it's considered a match\n return True # all criteria matched at all", "def _add_dict_values(self, d1, d2):\n\n if d1 is None and d2 is None:\n return None\n\n d1 = d1 or {}\n d2 = d2 or {}\n\n added = {}\n for key in set(list(d1.keys()) + list(d2.keys())):\n added[key] = dict(d1.get(key, {}), **(d2.get(key, {})))\n return added", "def add_item(dct,item):\r\n if item not in dct[0]:\r\n print \"itemNotFound \",str(item)\r\n return False\r\n\r\n num=len(item)\r\n if num in dct:\r\n if item in dct[num]:\r\n return False\r\n else:\r\n dct[num].append(item)\r\n return True\r\n else:\r\n dct[num]=[item]\r\n return True", "def _add_dictionary(self, current, added):\n for key in added:\n if key in current and isinstance(current[key], collections.Mapping):\n self._add_dictionary(current[key], added[key])\n else:\n current[key] = added[key]", "def validate_present(self, obj):\n for k, v in obj.items():\n func = self.validation.get(k)\n if func:\n func(k, v)", "def add(self, obj: object) -> None:\n self._contains.append(obj)", "def test_merge_aggregate_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def match_fill(self,MatchMake_inst):\n if type(MatchMake_inst) is not MatchMake:\n raise TypeError(\"Wrong datatype: MatchMake_inst has to be of custom type MatchMake!\")\n\n res_dict = {}\n for match in MatchMake_inst._score_list:\n try:\n res_dict[match.match_score].append(match.match_target)\n \n except KeyError:\n res_dict[match.match_score] = [match.match_target]\n \n\n self.add(MatchMake_inst._source) # add source in network\n parent = [MatchMake_inst._source]\n for score in sorted(res_dict.keys(),reverse=True):\n for target in res_dict[score]:\n self.add(target,parent,score)\n parent = res_dict[score]", "def add_dict(dest, src):\n for key in src.keys():\n if key in dest.keys():\n dest[key] += src[key]\n else:\n dest[key] = src[key]", "def from_re_match(cls, match):\n kwargs = match.groupdict()\n player_location = kwargs['player_location'].split()\n kwargs['player_location'] = (int(player_location[0]),\n int(player_location[1]),\n int(player_location[2]))\n target_location = kwargs['target_location'].split()\n kwargs['target_location'] = (int(target_location[0]),\n int(target_location[1]),\n int(target_location[2]))\n if match.string.endswith('(headshot)'):\n kwargs['headshot'] = True\n return cls(**kwargs)", "def add(self, key, value):", "def find(found_item, hash_table_cell):\n if found_item:\n found_item[1] = obj\n else:\n hash_table_cell.append([key, obj])\n self.size += 1\n self._keys.append(key)", "def save(self, **kwargs):\n for k, v in kwargs.items():\n self._validated_data[k] = v\n\n # Create or update direct relations (foreign key, one-to-one)\n related_objects = self._extract_reverse_relations(kwargs)\n self._save_direct_relations(kwargs)\n\n # TODO: move to a specialized class (easier to subclass)\n try:\n match_on = {}\n for field_name, field in self.get_fields().items():\n if self.match_on == '__all__' or field_name in self.match_on:\n match_on[field.source or field_name] = self._validated_data.get(field_name)\n # a parent serializer may inject a value that isn't among the fields, but is in `match_on`\n for key in self.match_on:\n if key not in self.get_fields().keys():\n match_on[key] = self._validated_data.get(key)\n match = self.queryset.get(**match_on)\n for k, v in self._validated_data.items():\n setattr(match, k, v)\n except ObjectDoesNotExist:\n match = self.queryset.model(**self._validated_data)\n except (TypeError, ValueError):\n self.fail('incorrect_type', data_type=type(self._validated_data).__name__)\n match.save()\n\n self._save_reverse_relations(related_objects, instance=match)\n return match", "def test_deep_set_create(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.set_dict_key_value(mdict, \"K:L:M\", \"Q\")\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\"}},\n \"K\": {\"L\": {\"M\": \"Q\"}},\n },\n res,\n )", "def post(self):\n match_data = self.get_from_body('match')\n try:\n guest_id = match_data['guest']\n guest_points = match_data['guestPoints']\n host_id = match_data['host']\n host_points = match_data['hostPoints']\n if host_id is guest_id:\n self.write_signed_error(400, 'Host and guest id cannot be equal')\n match = Match(guest=ndb.Key(User, guest_id),\n guest_points=guest_points,\n host=ndb.Key(User, host_id),\n host_points=host_points)\n # Asynchronously update player data\n guest_future = User.get_by_id_async(guest_id)\n host_future = User.get_by_id_async(host_id)\n guest = guest_future.get_result()\n host = host_future.get_result()\n guest.experience += guest_points\n host.experience += host_points\n # Update entities\n results = _update_entities(match, host, guest)\n # Prepare message\n match_key = results[0].urlsafe()\n self.write_signed_message(201, 'id', match_key)\n except (KeyError, TypeError):\n self.write_signed_error(400, 'Missing attributes for match')\n except TransactionFailedError:\n self.write_signed_error(507, 'Unable to store match')\n except AttributeError:\n self.write_signed_error(400, 'Invalid id')\n except BadValueError:\n # Thrown when model validations fail\n self.write_signed_error(400, 'Invalid data')", "def test_append_to_results_in(self):\n # pre conditions\n field = 'foo'\n value = 'bar'\n existing = ['baz']\n existing.append(value)\n values_dict = {field: existing}\n\n self.assertTrue(value in values_dict.get(field), \"pre-condition failed\")\n\n # test\n result = gen.append_to_results(field, value, values_dict, unique=True)\n\n # post conditions\n expected = ['baz', 'bar']\n self.assertEqual(result, expected)", "def test_apply_scalar_map(self):\n super(TestObjDict, self).test_apply_scalar_map(_as_obj=True)", "def check_dict(dic, validator, messages):\n check_dict_alg(dic, validator, [], messages, validator, \"NoObject\")", "def build_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = string\r\n answer['match'] = tmp\r\n return answer", "def add_to_dict_fail( self, key_0, key_1, list_2 ):\n dict_1_new = { key_1: list_2 }\n\n\n dict_1_current = self._dd_dict.get( key_1 )\n if dict_1_current is None:\n #dict_1_new = dict_1_new\n pass\n\n else:\n dict_1_new = {**dict_1_current, **dict_1_new }\n\n\n dict_0_new = { key_0: dict_1_new } # maybe a merge ??\n\n dict_0_current = self._dd_dict.get( key_0 )\n\n if dict_0_current is None:\n dict_0_new = dict_0_new\n\n else:\n dict_0_new = {**dict_0_current, **dict_0_new }\n\n dict_0_new = {**dict_0_current, **dict_0_new }\n\n self._dd_dict = { **self._dd_dict, **dict_0_new }\n\n print( self._dd_dict )\n\n return self._dd_dict", "def func(match, create_new=False, *args, **kwargs):\n\n data = match.group(0)\n\n if create_new:\n dash = data.replace(\"_\", \"-\") if \"_\" in data else data\n\n if dash not in objects:\n new = gen_guid()\n objects[dash] = new\n objects[dash.replace(\"-\", \"_\")] = new.replace(\"-\", \"_\")\n\n if data in objects:\n return (objects[data], True)\n\n return (data, False)", "def merge_dict(target, addition):\n for key in addition:\n if key in target and isinstance(target[key], dict) \\\n and isinstance(addition[key], dict):\n merge_dict(target[key], addition[key])\n else:\n target[key] = addition[key]", "def __iadd__(self, other):\n if not isinstance(other, dict):\n msg = 'Can not concatenate Dict and {}'.format(type(other))\n raise TypeError(msg)\n for key, val in other.items():\n if key in self:\n self._append_key(key, val)\n else:\n self[key] = val\n return self", "def test_adduser_entry(user_tuple, wordlist, input_dict, output_dict):\n\n assert set(adduser_entry(user_tuple, wordlist, input_dict)[user_tuple]) == set(output_dict[user_tuple])\n for key in output_dict:\n assert type(key) == tuple\n assert type(output_dict[user_tuple]) == list", "def add_to_dict(from_dict, to_dict):\n for k, v in list(from_dict.items()):\n if hasattr(v, 'copy') and callable(getattr(v, 'copy')):\n to_dict[k] = v.copy()\n else:\n to_dict[k] = v", "def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: \"Topology\",\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[\n Union[TopologyKey, LibraryChargeTopologyKey],\n PotentialKey,\n ] = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n topology_key = TopologyKey(atom_indices=key)\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n )\n self.key_map[topology_key] = potential_key\n\n if self.__class__.__name__ in [\n \"SMIRNOFFBondCollection\",\n \"SMIRNOFFAngleCollection\",\n ]:\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n assigned_terms=matches,\n topology=topology,\n valence_terms=valence_terms,\n )", "def match_id(self, match_id):\n\n self._match_id = match_id", "def add(obj):", "def create_or_update_match_settings(\n request: MatchSettingsUpdateRequest,\n ) -> MatchSettingsUpdateResponse:\n if config := AdditionalMatchSettingsConfig.get(request.privacy_group_id):\n config.pdq_match_threshold = request.pdq_match_threshold\n hmaconfig.update_config(config)\n\n return MatchSettingsUpdateResponse(\n f\"match_settings updated for pg_id {request.privacy_group_id} with pdq_match_threshold={request.pdq_match_threshold}\"\n )\n\n config = AdditionalMatchSettingsConfig(\n request.privacy_group_id, request.pdq_match_threshold\n )\n hmaconfig.create_config(config)\n return MatchSettingsUpdateResponse(\n f\"match_settings created for pg_id {request.privacy_group_id} with pdq_match_threshold={request.pdq_match_threshold}\"\n )", "def _match(self, rule, obj):\n\n for key in rule:\n if key == '$and':\n if not self.handle_and(key, rule[key], obj):\n return False\n\n elif key == '$or':\n if not self.handle_or(key, rule[key], obj):\n return False\n\n elif key == '$nor':\n if not self.handle_nor(key, rule[key], obj):\n return False\n\n elif not self.handle_field(key, rule[key], obj):\n return False\n\n return True", "def _find_match(needle: dict, haystack: list, keys: list):\n for item in haystack:\n for key in keys:\n if item.get(key) != needle[key]:\n break\n else:\n return item\n return None", "def calculate_match_at_backend():\n\n # Calculate lovermatch list for each user\n user_set = UserInfo.objects\n for u1 in user_set:\n matchlist = {}\n for u2 in user_set:\n if u1 != u2:\n features_to_match = u1.features\n weights = u1.percentage\n sim = get_similarity(u1, u2, features_to_match, weights)\n matchlist[u2.name] = sim\n u1.loverMatch = matchlist\n u1.save()\n\n # Calculate lovermatched list for each user\n user_set = UserInfo.objects\n for u1 in user_set:\n matchedlist = {}\n for u2 in user_set:\n if u1 != u2:\n if u1.name in u2.loverMatch.keys():\n matchedlist[u2.name] = u2.loverMatch[u1.name]\n u1.loverMatched = matchedlist\n u1.save()", "def store_matches(\n self,\n parameter_handler: ImproperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n parameter_handler._assert_correct_connectivity(\n val,\n [\n (0, 1),\n (1, 2),\n (1, 3),\n ],\n )\n n_terms = len(val.parameter_type.k)\n for n in range(n_terms):\n smirks = val.parameter_type.smirks\n non_central_indices = [key[0], key[2], key[3]]\n\n for permuted_key in [\n (\n non_central_indices[i],\n non_central_indices[j],\n non_central_indices[k],\n )\n for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]\n ]:\n topology_key = ImproperTorsionKey(\n atom_indices=(key[1], *permuted_key),\n mult=n,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ImproperTorsions\",\n )\n self.key_map[topology_key] = potential_key", "def add_object(self, object_to_be_added):\n new_mapping = Map.add_object(self.id, object_to_be_added)\n if new_mapping:\n object_to_be_added.save()\n new_mapping.ref_id = object_to_be_added.id\n return True\n else:\n return False", "def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def from_dict(self,\n d: dict,\n iter_if: FunctionType = None,\n iter_by: FunctionType = None,\n ) -> NoReturn:\n for k, v in d.items():\n if isinstance(iter_if, FunctionType) and not iter_if(k, v):\n continue\n if iter_by:\n v = iter_by(k, v)\n if k not in dir(self): # TODO is it strict?\n # warn develop\n print(f\"warning: a key({k}) not in the document model is inserting into mongodb.\")\n setattr(self, k, v)", "def traverse_dict_and_add(self, rootDir, dictH):\n origRootDir = rootDir\n for key, item in dictH.iteritems():\n if item is None or item == {} or item == []:\n attemptedJoin = os.path.normpath(os.path.join(rootDir, key))\n keyPath = None\n if not os.path.isabs(key) and (os.path.isdir(attemptedJoin) or\n os.path.isfile(attemptedJoin)):\n # copy the found file/folder to directory\n keyPath = attemptedJoin\n if os.path.isabs(key) and (os.path.isfile(key) or\n os.path.isdir(key)):\n # copy file/folder to the root location\n if not os.path.isdir(rootDir):\n paths.mkdir_p(rootDir)\n keyPath = paths.path_leaf(key)\n copyLoc = os.path.join(rootDir, keyPath)\n shutil.copy2(key, copyLoc)\n continue # skip the rest of this iteration\n\n if keyPath is not None and not os.path.isdir(keyPath):\n # the string was either not a file/folder or couldn't be\n # resolved from a relative path into a file/folder\n #\n copyLoc = paths.path_leaf(keyPath)\n copyLoc = os.path.join(rootDir, copyLoc)\n print copyLoc\n shutil.copy2(key, copyLoc)\n elif keyPath is None:\n # no directory exists at this location, create one\n dirToMake = os.path.normpath(os.path.join(rootDir, key))\n os.makedirs(dirToMake)\n # sys.exit('Got: \"{f}\", couldn\\'t resolve '\n # 'into file or folder'.format(f=key))\n\n elif isinstance(item, dict):\n newRootDir = os.path.join(rootDir, key)\n newRootDir = os.path.normpath(newRootDir)\n self.traverse_dict_and_add(rootDir=newRootDir,\n dictH=dictH[key])\n else:\n sys.exit('Got: \"{f}\", expected a dictionary, '\n '\\{\\} or None'.format(f=item))", "def test_deep_append(self):\n sdict = {\"bar\": {\"baz\": [1, 2]}}\n res = dictupdate.append_dict_key_value(sdict, \"bar:baz\", 42)\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42]}}, res)\n # Append with alternate delimiter\n res = dictupdate.append_dict_key_value(sdict, \"bar~baz\", 43, delimiter=\"~\")\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 43]}}, res)\n # Append to a not-yet existing list\n res = dictupdate.append_dict_key_value({}, \"foo:bar:baz\", 42)\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": [42]}}}, res)", "def _match(self, check):\n matches = []\n tests = {}\n for k, v in check.items():\n if isinstance(v, dict):\n tests[k] = CompositeFilter(v)\n else:\n tests[k] = lambda o: _add_tz(o) == _add_tz(v)\n\n for rec in self._records.values():\n if self._match_one(rec, tests):\n matches.append(deepcopy(rec))\n return matches", "def handleMatch(self, m):\r\n pass", "def add_dict_entry(dictionary: dict, key: Any, value: Any) -> None:\n try:\n dictionary[key].append(value)\n except KeyError:\n dictionary[key] = [value]", "def apply_scalar_map(self, *args, **kwargs):\n kwargs['_as_obj'] = True\n return super(ObjDict, self).apply_scalar_map(*args, **kwargs)", "def update_dict(new,old):", "def match(self, item):", "def find(self, a, b):\n\t\tfor i in b:\n\t\t\tif 'ipv4' in i and 'ipv4' in a:\n\t\t\t\tif i['ipv4'] == a['ipv4']:\n\t\t\t\t\ti.update(a)\n\t\t\t\t\treturn\n\t\t\telif 'ipv6' in i and 'ipv6' in a:\n\t\t\t\tif i['ipv6'] == a['ipv6']:\n\t\t\t\t\ti.update(a)\n\t\t\t\t\treturn\n\t\t\telif 'hostname' in i and 'hostname' in a:\n\t\t\t\tif i['hostname'] == a['hostname']:\n\t\t\t\t\ti.update(a)\n\t\t\t\t\treturn\n\t\tb.append(a)\n\t\treturn" ]
[ "0.72251153", "0.6223135", "0.612522", "0.6043846", "0.6039093", "0.5865672", "0.5826964", "0.5673978", "0.5657533", "0.56395197", "0.5595575", "0.55939347", "0.5582442", "0.55700904", "0.55490994", "0.54979956", "0.5410391", "0.5368418", "0.536791", "0.535772", "0.5288205", "0.5236559", "0.52085674", "0.5194137", "0.5194137", "0.51784515", "0.5150603", "0.51416355", "0.5136652", "0.51329386", "0.51117957", "0.51086485", "0.5105162", "0.510268", "0.5099662", "0.5097704", "0.50716406", "0.5065202", "0.5056278", "0.50531244", "0.50442034", "0.50409013", "0.50400573", "0.50330704", "0.50313747", "0.5022077", "0.49991575", "0.49902794", "0.4989795", "0.49827197", "0.49775675", "0.4976321", "0.4961318", "0.49597058", "0.4955217", "0.49515486", "0.49506053", "0.49405125", "0.4911506", "0.4906947", "0.49038094", "0.4892627", "0.48887813", "0.48864618", "0.48768723", "0.4875549", "0.48695296", "0.48669994", "0.486568", "0.48595595", "0.48559168", "0.48544765", "0.48503304", "0.4849082", "0.4839271", "0.4839074", "0.48311737", "0.48275658", "0.48261672", "0.4813723", "0.4808131", "0.48005787", "0.47974283", "0.4796537", "0.47934455", "0.47928852", "0.47928467", "0.47869116", "0.47856015", "0.47749624", "0.47642645", "0.4747214", "0.47470838", "0.4745575", "0.4741018", "0.4740947", "0.47349185", "0.4734899", "0.47285774", "0.47275716" ]
0.84316427
0
Find a match using match_dict. Returns None if there is no match. Checks to make sure match_dict keys are valid.
def match(self, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if self.tracker[key_to_track].has_key(match_val): return self.tracker[key_to_track][match_val] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_match(needle: dict, haystack: list, keys: list):\n for item in haystack:\n for key in keys:\n if item.get(key) != needle[key]:\n break\n else:\n return item\n return None", "def dict_match(d, key, default=None):\n\n if key in d and \"[\" not in key:\n return d[key]\n else:\n for pattern, value in iteritems(d):\n if fnmatchcase(key, pattern):\n return value\n return default", "def find_match(name, dictionary):\n if name == '':\n # raise \"Didn't find name\"\n return False\n search_name = (' ').join(name.split(' ')[:-1])\n if search_name in dictionary:\n return search_name\n else:\n return find_match(search_name, dictionary)", "async def get_match_from_id(match_id: int) -> Match or None:\n if match_id is None:\n return None\n\n if match_id in match_library:\n return match_library[match_id]\n\n raw_data = await matchdb.get_raw_match_data(match_id)\n if raw_data is not None:\n return await make_match_from_raw_db_data(raw_data)\n else:\n return None", "def find(self, location, dictionary=None, key_index=1):\n\n # dictionary is self object if no dictionary is provided\n if not dictionary:\n dictionary = self\n\n # take first value field to be found\n value = normalize_name(location[key_index])\n\n # extract matched value from\n value_matched = process.extractOne(value, dictionary.keys())\n\n if value_matched and value_matched[1] > self.THRESHOLD_RATIO:\n key = value_matched[0]\n\n # if there are more values to evaluate, call recursively\n if len(location) > key_index + 1:\n # print value_matched[1],\n return self.find(location, dictionary[key], key_index + 1)\n\n else:\n # print value_matched[1],\n return dictionary[key]\n\n else:\n return None", "def search(d, key, default=None):\n stack = [iter(d.items())]\n while stack:\n for k, v in stack[-1]:\n if isinstance(v, dict):\n stack.append(iter(v.items()))\n break\n elif k == key:\n return v\n else:\n stack.pop()\n return default", "def get_match_result(self, variable):\n try:\n return self._match_result_dict[variable]\n except KeyError:\n return None", "def find(name, *dicts):\n for d in dicts:\n if type(d) == str:\n return d\n elif name in d and d[name] is not None:\n return d[name]\n\n return None", "def find_in_dict(obj, key):\n if key in obj:\n return obj[key]\n for k, v in obj.items():\n if isinstance(v,dict):\n item = find_in_dict(v, key)\n if item is not None:\n return item", "def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el", "def find(self, entry_id: str, match: callable = operator.contains) -> Any:\n try:\n return next(self.find_all(entry_id, match))\n except StopIteration as error:\n raise errors.NoEntryFound(f'No entries found which match {entry_id}') from error", "def find_value(dic, key):\n return dic[key]", "def search_value(d, key, default=None):\n stack = [iter(d.items())]\n while stack:\n for k, v in stack[-1]:\n if isinstance(v, dict):\n stack.append(iter(v.items()))\n break\n elif k == key:\n return v\n else:\n stack.pop()\n return default", "def get_match_hash_key(hashkey, table):\n if hashkey in table:\n # assume that this has at least one flow entry\n b = convert_ip_to_int(table[hashkey][0].nw_src)\n a = convert_ip_to_int(table[hashkey][0].nw_dst)\n match_hash = (a * a + a + b) if a >= b else (a + b * b)\n if match_hash in table:\n return match_hash\n else:\n return None\n else:\n return None", "def dict_find_name(some_dict: Dict[str, Referent], path: List[str]) -> Result:\n if path:\n head, *tail = path\n try:\n return NameContainer.dict_find_name(\n cast(Dict[str, Referent], some_dict[head]),\n tail)\n except KeyError:\n NameContainer.logger.debug(f\"{head!r} not found in {some_dict.keys()}\")\n raise NameContainer.NotFound(path)\n else:\n return cast(Result, some_dict)", "def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None", "def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None", "def _search_list_of_dictionaries(key, value, list_of_dictionaries):\n\n for element in list_of_dictionaries:\n if element.get(key) == value:\n return element\n return None", "def findMatcher(self, ch):\n for m in self.matchers:\n if m.match(ch):\n return m\n return None", "def match(id=0):\n match = Match.query.get(id)\n if match is not None:\n return render_template('match.html', match=Match.query.get(id))\n abort(404)", "def _find_by_key(self, key, find):\n index = hashId(key, self.capacity) # Get the index/ bucket based on hash code of the key\n \n hash_table_cell = self._entry[index]\n found_item = None\n for item in hash_table_cell: #Iterrate the entry array and check the key is matching and if key is same than get the value\n if item[0] == key:\n found_item = item\n break\n\n return find(found_item, hash_table_cell)", "def lookup(my_dict, my_key, default_value=None):\n if my_key in my_dict:\n return my_dict[my_key]\n else:\n return default_value", "def find(self, answer):\n self._validate(answer)\n\n for index, existing in enumerate(self.answers):\n if answer.matches_dict(existing):\n return index\n\n return None", "def parse_line(keyword_dict, line):\n for key, rx in keyword_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n\n return None, None", "def get_match_by_id(self, match_id):\n request = rq.get(\n 'https://{region}.api.pvp.net/api/lol/{region}/v{version}/match/{id}?api_key={api_key}'.format(\n region=self.region,\n version=api_version['match'],\n id=match_id,\n api_key=API_KEY\n )\n )\n print(self.region, request)\n check_response(request)\n return request.json()", "def findKey(value,dict):\n for key, val in dict.iteritems():\n if value in val:\n return key", "def dict_search_recursive(d, k):\n # FIXME: make it generic recursive search over nested graphs and move to smp_base\n\n # print \"#\" * 80\n # print \"searching k = %s \" % (k,),\n if k in d:\n # print \"found k = %s, params = %s\" % (k, d[k]['params'].keys())\n return d[k]\n else:\n # print \"d.keys()\", d.keys()\n for k_, v_ in list(d.items()):\n # if v_[\n if 'graph' in v_['params']: # or v_['params'].has_key('subgraph'):\n # print \"k_\", k_, \"v_\", v_['params'].keys()\n return dict_search_recursive(v_['params']['graph'], k)\n # None found\n return None", "def find_item(hash_table_ref, key_sig):\n if not hasattr(key_sig, '__iter___'):\n key_sig = [key_sig]\n\n last_item = key_sig[len(key_sig) - 1]\n for key in key_sig:\n if key != last_item:\n if hasattr(hash_table_ref, 'keys') and key in hash_table_ref:\n hash_table_ref = hash_table_ref[key]\n else:\n # Item not found\n return None\n else:\n if hasattr(hash_table_ref, 'keys') and \\\n key in hash_table_ref:\n return hash_table_ref[key]\n # Item not found\n return None", "def findItem(obj, key):\n if key in obj:\n return obj[key]\n if type(obj) == str:\n return None\n for k, v in obj.items():\n if isinstance(v, dict):\n item = findItem(v, key)\n if item is not None:\n return item\n elif isinstance(v, list):\n for list_item in v:\n item = findItem(list_item, key)\n if item is not None:\n return item", "def find_one_bykey(cls, keydict, defaultval = None):\n return cls.dbm().modelclass_find_one_bykey(cls, keydict, defaultval)", "def lod_find(list_of_dict, key, val, return_val_if_false = False):\n for d in list_of_dict:\n if isinstance(d, dict) and d[key] == val: return d\n return return_val_if_false", "def get(self, match_id):\n try:\n match_key = ndb.Key(urlsafe=match_id)\n match = match_key.get()\n json_result = {'id': match_id,\n 'guest': match.guest.id(),\n 'guestPoints': match.guest_points,\n 'host': match.host.id(),\n 'hostPoints': match.host_points,\n 'timestamp': match.timestamp.isoformat()}\n self.write_signed_message(200, 'match', json_result)\n except (AttributeError, BadRequestError, ProtocolBufferDecodeError):\n self.write_signed_error(400, 'Invalid match id')", "def depth_first_search(self, target: Dict) -> Optional[Node]:\n\n def search(current_node: Node):\n flag = True\n for k, v in target.items():\n flag = flag and getattr(current_node, k) == v\n if not flag:\n break\n if flag:\n return current_node\n for child in current_node.children:\n ret = search(child)\n if ret:\n return ret\n return search(self.root_node)", "def dict_match(left, right, res=None):\n if res is None:\n res = [True, ]\n if res[0] == False:\n return False\n for k in right.keys():\n if (k in left):\n if (isinstance(left[k], dict) and isinstance(right[k], dict)):\n dict_match(left[k], right[k], res=res)\n else:\n res[0] = res[0] and left[k] == right[k]\n if res[0] == False:\n break\n return res[0]", "def resolve(self, key: str) -> Optional[Any]:\n return self.dict.get(key)", "def _find_closest_key(dictionary, target_key):\n out_key = None\n for key, value in dictionary.items():\n if target_key in key:\n out_key = key\n else:\n for key1, value1 in value.items():\n if target_key in key1:\n out_key = key1\n if out_key is None:\n raise Exception('Could not find a key in the dict which resembles \"{}\" which is needed for plotting'.format(target_key))\n\n return out_key", "def find_value_for_nested_key(mapping, key_of_interest, tree=[]):\n original_mapping = mapping\n logger.debug(\"Looking for key %s\", key_of_interest)\n logging.debug(\"Looking in %s\", mapping)\n logger.debug(\"Using tree %s\", tree)\n if tree:\n for leaf in tree:\n mapping = mapping[leaf]\n else:\n tree = [None]\n for leaf in reversed(tree):\n logging.debug(\"Looking in bottommost leaf %s\", leaf)\n for key, value in six.iteritems(mapping):\n if key == key_of_interest:\n return value\n if leaf:\n find_value_in_nested_key(original_mapping, key_of_interest, tree[:-1])\n warnings.warn(\"Couldn't find value for key %s\" % key_of_interest)\n # raise KeyError(\"Couldn't find value for key %s\", key_of_interest)", "def dict_item(dictionary, key):\n try:\n return dictionary.get(key, None)\n except AttributeError:\n # fail silently if something other than a dict is passed\n return None", "def match_source_key(self, match):\n raise NotImplementedError", "def search(self, key):\n \"\"\"\n\n if key in song_dict:\n # for obj in song_dict[dict_key]:\n # print(obj.song_name)\n return True\n else:\n # print(\"No such key exists in dictionary\")\n return False\n \"\"\"\n #dict.get(\"Katt\", None)\n return self.dictionary.get(key)", "def validate_match(context):\n\n schema = isinstance(match_schema, ValidateViewHook) and match_schema(request) or match_schema\n\n state = Dummyobj()\n state.request = request\n state.context = context\n try:\n return schema.to_python(request.matchdict, state=state)\n except formencode.Invalid as exc:\n unpacked = exc.unpack_errors()\n request.set_property(lambda ctx: unpacked,\n invalid_match_attr, reify=True)\n if raise_exc is True:\n _raise(invalid_match_exc, unpacked)\n else:\n return {}", "def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False", "def find(self, key):\n visitor = VisitorFind()\n\n self.visit(key, visitor)\n\n return visitor.result", "async def call_find(\n self, peer: PeerID, keys: Collection[DHTID]\n ) -> Optional[\n Dict[\n DHTID, Tuple[Optional[ValueWithExpiration[Union[BinaryDHTValue, DictionaryDHTValue]]], Dict[DHTID, PeerID]]\n ]\n ]:\n keys = list(keys)\n find_request = dht_pb2.FindRequest(keys=list(map(DHTID.to_bytes, keys)), peer=self.node_info)\n try:\n async with self.rpc_semaphore:\n response = await self.get_stub(peer).rpc_find(find_request, timeout=self.wait_timeout)\n if response.peer and response.peer.node_id:\n peer_id = DHTID.from_bytes(response.peer.node_id)\n asyncio.create_task(self.update_routing_table(peer_id, peer, responded=True))\n assert len(keys) == len(response.results), \"DHTProtocol: response is not aligned with keys\"\n\n output = {} # unpack data depending on its type\n for key, result in zip(keys, response.results):\n key_bytes = DHTID.to_bytes(key)\n nearest = dict(\n zip(\n map(DHTID.from_bytes, result.nearest_node_ids),\n map(PeerID, result.nearest_peer_ids),\n )\n )\n\n if result.type == dht_pb2.NOT_FOUND:\n output[key] = None, nearest\n elif result.type == dht_pb2.FOUND_REGULAR:\n if not self._validate_record(\n key_bytes, self.IS_REGULAR_VALUE, result.value, result.expiration_time\n ):\n output[key] = None, nearest\n continue\n\n output[key] = ValueWithExpiration(result.value, result.expiration_time), nearest\n elif result.type == dht_pb2.FOUND_DICTIONARY:\n value_dictionary = self.serializer.loads(result.value)\n if not self._validate_dictionary(key_bytes, value_dictionary):\n output[key] = None, nearest\n continue\n\n output[key] = ValueWithExpiration(value_dictionary, result.expiration_time), nearest\n else:\n logger.error(f\"Unknown result type: {result.type}\")\n\n return output\n except Exception as e:\n logger.debug(f\"DHTProtocol failed to find at {peer}\", exc_info=True)\n asyncio.create_task(self.update_routing_table(self.routing_table.get(peer_id=peer), peer, responded=False))", "def find_first_regex_match(key, regex_candidates):\n for cand in regex_candidates:\n try:\n pattern = re.compile(BaseInterface.cap_match_string(cand))\n if pattern.match(key):\n return cand\n except:\n logging.warn('[ros_interface] Ignoring invalid regex string \"{0!s}\"!'.format(cand))\n\n return None", "def row_by_value(idl_, table, column, match, default=_NO_DEFAULT):\n tab = idl_.tables[table]\n for r in tab.rows.values():\n if getattr(r, column) == match:\n return r\n if default is not _NO_DEFAULT:\n return default\n raise None", "def match(self, route):\n regexp = re.sub(r\"\\{([a-zA-Z][^\\}]*)\\}\", r\"(?P<\\1>([^}]+))\", route)\n r = re.match(regexp, self.path)\n if r is not None:\n self.matchdict = r.groupdict()", "def search(self, string, match):\n\n if len(string) > 0:\n key = string[0]\n\n if key < self.ch:\n if self.left == 0:\n print \"No Match Found\"\n return\n self.left.search(string, match)\n\n elif key > self.ch:\n if self.right == 0:\n print(\"Not Match Found\")\n return\n self.right.search(string, match)\n\n else:\n if len(string) == 1:\n if self.flag == 1:\n print(\"Match \", match + self.ch)\n if self.center != 0:\n self.center.spdfs(match + self.ch + self.center.ch)\n return 1\n self.center.search(string[1:], match + key)\n\n else:\n print(\"Invalid String\")\n return", "def find(self, key):\n curr_node = self.head\n\n while curr_node is not None: # a normal traversal and checking first match\n if curr_node.data == key:\n return curr_node\n curr_node = curr_node.next\n\n return None", "def getMatchGroupDic(inputText, groupDict):\n\tfor key in groupDict.keys():\n\t\tif str(ord(inputText)) == str(key.unicode):\n\t\t\treturn groupDict\n\n\treturn None", "def _find_equivalent(searched_dict, dicts_list):\n for id_key in ('id', 'uid', 'name'):\n # Recognize the ID key used, if any\n local_id = searched_dict.get(id_key)\n if local_id:\n # Found an ID\n for other_item in dicts_list:\n if other_item.get(id_key) == local_id:\n # Found an item with the same ID\n return other_item\n \n # Found nothings\n return None", "def extract(dictionary: Any, key: Any) -> Union[Any, None]:\n if dictionary is None or not isinstance(dictionary, dict):\n return None\n return dictionary.get(key)", "def find_demand_id(demand_dict, vn_id, fvr_id, svr, nbr):\n #print vn_id, fvr_id, svr, nbr\n for demand_id in demand_dict:\n if vn_id == demand_dict[demand_id]['vn_id'] and \\\n fvr_id == demand_dict[demand_id]['fnode_id'] and \\\n svr == demand_dict[demand_id]['svr'] and \\\n nbr == demand_dict[demand_id]['nbr_id']:\n return demand_id", "def dict_find(in_dict, value):\n # Todo: make this robust to repeated values\n # Todo: make this robust to missing values\n return in_dict.keys()[in_dict.values().index(value)]", "def get_latest_match(self, steam_id):\n try:\n req = self.steam_api.get_match_history(account_id=steam_id, matches_requested=1)\n result = req['result']\n except:\n return None\n\n if result['status'] == 15:\n return {}\n\n elif result['num_results'] == 0:\n return {}\n\n return result['matches'][0]", "def fetch_next_match() -> Optional[MatchDict]:\n future_matches = Match.objects.filter(start_date_time__gt=timezone.now())\n\n if not any(future_matches):\n return None\n\n next_match = min(future_matches, key=lambda match: match.start_date_time)\n\n return {\n \"round_number\": next_match.round_number,\n \"season\": next_match.start_date_time.year,\n }", "def get_from_dict(d, k):\n try:\n return reduce(dict.get, k, d)\n except TypeError:\n # Value not found.\n return None", "def find_exact(self, **kwargs):\n results = list(self.find(**kwargs))\n if len(results) == 1:\n return results[0]\n return None", "def find_match(aunts: list, mfcsam: dict, fn_dict: dict={}) -> int:\n for idx, aunt in enumerate(aunts):\n match = True\n for name, val in mfcsam.items():\n # Ignore missing properties, and make sure that the ones that do exist\n # have matching values\n if name in aunt and not (fn_dict.get(name, lambda a, b : a == b))(val, aunt[name]):\n match = False\n break\n if match:\n return idx + 1\n return None", "def lookup(match):\n word = match.group(0)\n return symtab[unbase(word)] or word", "def case_insensitive_lookup_1(dictionary: dict, term: str) -> Optional[str]:\n key = term.lower()\n try:\n return dictionary[key.lower()]\n except KeyError:\n return None", "def find(self, target):\n try:\n if type(target) is int:\n for key, value in self.index.table.items():\n if value == target:\n return(key)\n elif type(target) is str:\n for key, value in self.index.table.items():\n if key == target:\n return(value)\n except Exception as error:\n print(f\"Error: self.find({target}) -> {error}\")", "def get_match_details(self, match_id=None, **kwargs):\n if 'match_id' not in kwargs:\n kwargs['match_id'] = match_id\n url = self.__build_url(urls.GET_MATCH_DETAILS, **kwargs)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)", "def _search(self, lookfor: typing.Any, stop_on_first: bool = False):\r\n\r\n # if the item is in the dictionary then just return it\r\n if self._dict_contains(lookfor):\r\n return True, lookfor, self._dict_getitem(lookfor), 1\r\n\r\n # set up the fuzzy matching tool\r\n ratio_calc = difflib.SequenceMatcher()\r\n ratio_calc.set_seq1(lookfor)\r\n\r\n # test each key in the dictionary\r\n best_ratio = 0\r\n best_match = None\r\n best_key = None\r\n for key in self:\r\n # if the current key is not a string\r\n # then we just skip it\r\n try:\r\n # set up the SequenceMatcher with other text\r\n ratio_calc.set_seq2(key)\r\n except TypeError:\r\n continue\r\n\r\n # we get an error here if the item to look for is not a\r\n # string - if it cannot be fuzzy matched and we are here\r\n # this it is definitely not in the dictionary\r\n try:\r\n # calculate the match value\r\n ratio = ratio_calc.ratio()\r\n except TypeError:\r\n break\r\n\r\n # if this is the best ratio so far - save it and the value\r\n if ratio > best_ratio:\r\n best_ratio = ratio\r\n best_key = key\r\n best_match = self._dict_getitem(key)\r\n\r\n if stop_on_first and ratio >= self.cutoff:\r\n break\r\n\r\n return best_ratio >= self.cutoff, best_key, best_match, best_ratio", "def checkDict(words, given_dict):\n\tcount = 0\n\tfor word in words:\n\t\tif word in given_dict:\n\t\t\tgiven_dict = given_dict[word]\n\t\telse:\n\t\t\treturn None\n\n\tif '$value' in given_dict:\n\t\treturn given_dict['$value'][0]\n\n\treturn given_dict", "def match(dictionary, query, policy='relaxed', matches=None):\n if query is None:\n return True\n assert policy in ['relaxed', 'strict'], \"\"\n\n for field, value in query.iteritems():\n if field not in dictionary:\n if policy == 'relaxed':\n continue\n else:\n return False\n if isinstance(value, list) or not isinstance(value, basestring):\n values = value if isinstance(value, list) else [value]\n if dictionary[field] not in values:\n return False\n if matches is not None:\n matches['%s_0' % (field)] = dictionary[field]\n else:\n if value == '':\n # Take special care if value is an empty string\n if value != dictionary[field]:\n return False\n elif matches is not None:\n matches['%s_0' % (field)] = dictionary[field]\n continue\n else:\n match = re.compile(value).match(dictionary[field])\n if not match:\n return False\n else:\n if matches is not None:\n matches['%s_0' % (field)] = dictionary[field]\n for index, group in enumerate(match.groups()):\n matches['%s_%d' % (field, index+1)] = group\n continue\n return True", "def matches(self, request: ServeRequest) -> Optional[Match]:\n for tag_key, tag_value in self.tags.items():\n request_value = None\n if tag_key == TAG_SERVE_MATCH_PATH:\n request_value = request.url\n elif tag_key == TAG_SERVE_MATCH_PATH_RELATIVE:\n request_value = request.url[1:]\n elif tag_key == TAG_SERVE_MATCH_HOST:\n request_value = request.headers.get('Host', '')\n elif tag_key.startswith(TAG_SERVE_MATCH_META):\n meta_key = tag_key.replace(TAG_SERVE_MATCH_META, '')\n request_value = request.headers.get(meta_key, '')\n LOGGER.debug(\"Checking '%s' against '%s'\", request_value, tag_value)\n regex = re.compile(tag_value)\n match = regex.match(request_value)\n if match is None:\n LOGGER.debug(\" => Not matching\")\n return None\n LOGGER.debug(\" => Matching, checking next tag\")\n return match", "def matches_dict(self, answer_dict):\n\n return self.matches(Answer(\n answer_dict['group_id'],\n answer_dict['block_id'],\n answer_dict['answer_id'],\n \"\",\n answer_dict['group_instance'],\n answer_dict['answer_instance'],\n ))", "def match(self, _str: str):\n result = self._regex.match(_str)\n if result:\n return result.groupdict() or True", "def find(self, value, key=\"name\", shallow=True):\n if key.lower() not in (\"name\", \"id\", \"path\"):\n raise ValueError()\n\n if not self.contents:\n self.get_contents()\n\n for content, level in self._iter_contents():\n if shallow and level != 0:\n continue\n if getattr(content, key, None) == value:\n return content\n\n self.project.app.logger.debug(\n \"{}: {} not found in '{}'\".format(key, value, self.name)\n )", "def find(self, **opts):\n negate = opts.get('negate', False)\n match_any = opts.get('match_any', negate)\n match_all = opts.get('match_all', not match_any)\n case_matters = opts.get('case_matters', False)\n reverse = opts.get('reverse', False)\n mapper = opts.get('map', lambda t: t)\n\n if callable(mapper):\n do_map = mapper\n elif is_string(mapper):\n\n def do_map(value):\n return getattr(value, mapper, None)\n elif isinstance(mapper, (list, tuple, set)):\n\n def do_map(value):\n return type(mapper)(getattr(value, t, None) for t in mapper)\n elif isinstance(mapper, dict):\n\n def do_map(value):\n return type(mapper)(\n (t, getattr(value, t, None)) for t in mapper)\n else:\n raise TypeError(\"Invalid mapping rule: %r\" % mapper)\n\n def gsense(f):\n\n def w(value, exp):\n if negate: return not f(value, exp)\n else: return f(value, exp)\n\n return w\n\n def tsense(f):\n\n def w(candidate, test_name, test_value):\n if test_name.startswith('not_'):\n return not f(candidate, test_name[4:], test_value)\n else:\n return f(candidate, test_name, test_value)\n\n return w\n\n @gsense\n def do_match(value, exp):\n if is_string(exp):\n if case_matters:\n return value == exp\n else:\n return value.lower() == exp.lower()\n elif hasattr(exp, 'search') and callable(exp.search):\n return exp.search(value)\n elif callable(exp):\n return bool(exp(value))\n elif exp in (None, True):\n return True # the attribute exists\n elif exp is False:\n return False # no such attribute\n\n # If we get here, the only other allowable option is\n # for exp to be a collection of things to try. If that\n # fails, the TypeError is propagated back to the caller.\n for e in iter(exp):\n if do_match(value, e):\n return True\n else:\n return False\n\n all_tests = []\n # Order matters here...\n\n for key in ('type', 'name', 'hasattr', 'hasattrs', 'attr', 'source',\n 'csource', 'innersource', 'partner', 'parent', 'predicate',\n 'value'):\n for k in (key, 'not_' + key):\n if k in opts: all_tests.append((k, opts[k]))\n\n # Choose candidates to search\n lo_limit = 0 # Search no index before this.\n hi_limit = len(self.wrappers) # Search no index at or after this.\n if 'search_after' in opts:\n lo_limit = opts['search_after'].obj_id + 1\n if 'search_before' in opts:\n hi_limit = opts['search_before'].obj_id\n if 'search_inside' in opts:\n t = opts['search_inside']\n p = t.partner\n if p is None:\n lo_limit = hi_limit = t.obj_id\n else:\n lo, hi = sorted((t.obj_id, p.obj_id))\n lo_limit = max(lo_limit, lo)\n hi_limit = min(hi_limit, hi)\n\n candidates = list(self.wrappers[i] for i in range(lo_limit, hi_limit))\n if reverse:\n candidates.reverse()\n\n @tsense\n def t_property(candidate, test_name, test_value):\n return (hasattr(candidate, test_name)\n and do_match(getattr(candidate, test_name), test_value))\n\n @tsense\n def t_associate(candidate, test_name, test_value):\n return (hasattr(candidate, test_name)\n and test_value(getattr(candidate, test_name))\n if callable(test_value) else\n getattr(candidate, test_name) is test_value)\n\n @tsense\n def t_has_attr(candidate, test_name, test_value):\n if test_name == 'hasattr':\n test_value = [test_value]\n\n if not hasattr(candidate, 'keys'):\n return False\n\n for tv in test_value:\n for key in candidate.keys():\n if do_match(key, tv):\n break\n else:\n return False\n else:\n return True\n\n @tsense\n def t_attr_match(candidate, test_name, test_value):\n if not hasattr(candidate, 'getattr'):\n return False\n elif isinstance(test_value, tuple):\n tv = [test_value]\n elif isinstance(test_value, dict):\n tv = test_value.items()\n else:\n raise TypeError(\"invalid key for attribute match: %s\" %\n test_value)\n\n for a_name, a_exp in tv:\n try:\n if not do_match(candidate[a_name].value, a_exp):\n return False\n except KeyError:\n if a_exp is not False:\n return False\n else:\n return True\n\n @tsense\n def t_predicate(candidate, test_name, test_value):\n return test_value(candidate)\n\n def t_fail(candidate, test_name, test_value):\n return False\n\n test_map = dict(\n type=t_property,\n not_type=t_property,\n name=t_property,\n not_name=t_property,\n hasattr=t_has_attr,\n not_hasattr=t_has_attr,\n hasattrs=t_has_attr,\n not_hasattrs=t_has_attr,\n attr=t_attr_match,\n not_attr=t_attr_match,\n source=t_property,\n not_source=t_property,\n csource=t_property,\n not_csource=t_property,\n innersource=t_property,\n not_innersource=t_property,\n value=t_property,\n not_value=t_property,\n parent=t_associate,\n not_parent=t_associate,\n partner=t_associate,\n not_partner=t_associate,\n predicate=t_predicate,\n not_predicate=t_predicate,\n )\n for candidate in candidates:\n ok = match_all\n\n for test_name, test_value in all_tests:\n tfunc = test_map.get(test_name, t_fail)\n ok = tfunc(candidate, test_name, test_value)\n\n if not match_all and ok: break\n if match_all and not ok: break\n\n if ok:\n yield do_map(candidate)", "def fromMatchId(cls, matchId):\n matchDict = CachedDB().getMatchById(matchId)\n if matchDict is None:\n return None\n return cls(matchDict)", "def match_by_id(self, match_id):\n\n url = API_PATH[\"match_by_id\"].format(\n region_url=self.region_url, match_id=match_id)\n\n response = requests.get(url, headers=self.headers)\n\n return response.json()", "def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None", "def _lookup_model(self, name, experiment_dict):\n if experiment_dict.get(name) is None:\n return None\n return self._lookups[name][experiment_dict[name]]", "def _key_list_search(self, keys_list, lookup_dict):\n for index, key in enumerate(keys_list):\n result = nested_lookup(key, lookup_dict)\n try:\n value = nested_lookup(keys_list[index + 1], result)\n except IndexError:\n pass\n return value", "def _find(self, hashV):\n return self.table.search(hashV)", "def query_dict(dictionary: Mapping[Any, Any], keys: Sequence[Any]) -> Union[Any, None]:\n\n def extract(dictionary: Any, key: Any) -> Union[Any, None]:\n \"\"\"Get value associated with key, defaulting to None.\"\"\"\n if dictionary is None or not isinstance(dictionary, dict):\n return None\n return dictionary.get(key)\n\n return reduce(extract, keys, dictionary)", "def _dict_lookup(typingctx, d, key, hashval):\n resty = types.Tuple([types.intp, types.Optional(d.value_type)])\n sig = resty(d, key, hashval)\n\n def codegen(context, builder, sig, args):\n fnty = ir.FunctionType(\n ll_ssize_t,\n [ll_dict_type, ll_bytes, ll_hash, ll_bytes],\n )\n [td, tkey, thashval] = sig.args\n [d, key, hashval] = args\n fn = cgutils.get_or_insert_function(builder.module, fnty,\n 'numba_dict_lookup')\n\n dm_key = context.data_model_manager[tkey]\n dm_val = context.data_model_manager[td.value_type]\n\n data_key = dm_key.as_data(builder, key)\n ptr_key = cgutils.alloca_once_value(builder, data_key)\n cgutils.memset_padding(builder, ptr_key)\n\n ll_val = context.get_data_type(td.value_type)\n ptr_val = cgutils.alloca_once(builder, ll_val)\n\n dp = _container_get_data(context, builder, td, d)\n ix = builder.call(\n fn,\n [\n dp,\n _as_bytes(builder, ptr_key),\n hashval,\n _as_bytes(builder, ptr_val),\n ],\n )\n # Load value if output is available\n found = builder.icmp_signed('>', ix, ix.type(int(DKIX.EMPTY)))\n\n out = context.make_optional_none(builder, td.value_type)\n pout = cgutils.alloca_once_value(builder, out)\n\n with builder.if_then(found):\n val = dm_val.load_from_data_pointer(builder, ptr_val)\n context.nrt.incref(builder, td.value_type, val)\n loaded = context.make_optional_value(builder, td.value_type, val)\n builder.store(loaded, pout)\n\n out = builder.load(pout)\n return context.make_tuple(builder, resty, [ix, out])\n\n return sig, codegen", "def get_match_settings(\n key=None,\n ) -> MatchSettingsResponseBody:\n if config := AdditionalMatchSettingsConfig.get(str(key)):\n return MatchSettingsResponseBody(config)\n return bottle.abort(400, f\"No match_settings for pg_id {key} found\")", "def get_matching_item(self, mapping: MutableMapping[str, Any],\n ns_prefix: str = 'xmlns',\n match_local_name: bool = False) -> Optional[Any]:\n if self.name is None:\n return None\n elif not self.target_namespace:\n return mapping.get(self.name)\n elif self.qualified_name in mapping:\n return mapping[cast(str, self.qualified_name)]\n elif self.prefixed_name in mapping:\n return mapping[cast(str, self.prefixed_name)]\n\n # Try a match with other prefixes\n target_namespace = self.target_namespace\n suffix = f':{self.local_name}'\n\n for k in filter(lambda x: x.endswith(suffix), mapping):\n prefix = k.split(':')[0]\n if self.namespaces.get(prefix) == target_namespace:\n return mapping[k]\n\n # Match namespace declaration within value\n ns_declaration = '{}:{}'.format(ns_prefix, prefix)\n try:\n if mapping[k][ns_declaration] == target_namespace:\n return mapping[k]\n except (KeyError, TypeError):\n pass\n else:\n if match_local_name:\n return mapping.get(self.local_name) # type: ignore[arg-type]\n return None", "def find(self, seq):\n \n if not isinstance(seq, str):\n seqstr = ''\n # Concatenate list\n for s in seq:\n seqstr += self.prepseq(s.upper())\n else:\n seqstr = self.prepseq(seq.upper())\n \n searchstr = self.digest(seqstr)\n\n if searchstr in self.seqs:\n hits = self.seqs[searchstr]\n\n if seqstr in hits:\n return hits[seqstr]\n else:\n return None\n \n else:\n return None", "def _match(self, filename: str) -> Optional[dict]:\n if not self.named_regexp:\n self.log.warning(\n \"Regular expression not provided for plugin. Run with \"\n \"`--help-all` flag for more information.\"\n )\n return None\n\n match = re.match(self.named_regexp, filename)\n if not match or not match.groups():\n self.log.warning(\n \"Regular expression '{}' did not match anything in: {}\"\n \"\".format(self.named_regexp, filename)\n )\n return None\n\n gd = match.groupdict()\n self.log.debug(\n \"Regular expression '{}' matched\\n'{}' in: {}\"\n \"\".format(self.named_regexp, gd, filename)\n )\n return gd", "def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"", "def find(lst, key, value):\n\n for i, dic in enumerate(lst):\n if dic[key] == value:\n return i\n return None", "def get_option(option_dict, option):\n if isinstance(option_dict, dict) and option in option_dict:\n return option_dict[option]\n elif not isinstance(option_dict, dict):\n return None\n else:\n for value in option_dict.values():\n result = SchedulePolicy.get_option(value, option)\n if result is not None:\n return result", "def find_key(dic, val): \n return [k for k, v in dic.iteritems() if re.search(v, val)]", "def search(self, key):\n if key in self.key_list:\n return (self.nodes)[key]\n return None", "def find(key, condition):\n for i, line in enumerate(lines):\n if condition(line, key):\n return i\n return None", "def get_from_dictionary(self,dictionary,key):\r\n try:\r\n return dictionary[key]\r\n except KeyError:\r\n raise RuntimeError(\"Dictionary does not contain key '%s'\" %key)", "def find_by_exact_match(self):\n while True: \n self.task_name_search = input(\"What is the keyword/s you are looking\"\n \" for? Press Q to quit to the main screen: \").strip()\n if self.task_name_search.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n x = self.dict_list\n return x\n self.find_by_exact_match_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(self.task_name_search, value):\n self.find_by_exact_match_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_exact_match_list)\n break\n self.del_or_edit()", "def match_phrase(self, phrase):\n key = None\n match_phrase = None\n for key in self.get_key_list(phrase):\n if key in self.cls_phrases:\n match_phrase = key\n break\n \n if match_phrase == None:\n return None, key\n return self.get_most_common(self.cls_phrases[match_phrase]), key", "def lookup(dic, key, *keys):\n if keys:\n return lookup(dic.get(key, {}), *keys)\n return dic.get(key)", "def get(self, key, def_value=None):\n\n index = self._get_hash(key)\n\n if self.table[index] is not None:\n for pair in self.table[index]:\n if key == pair[0]:\n return pair[1]\n\n if def_value is not None:\n return def_value\n\n raise ValueError(f\"can't find value with given key {key}\")", "def func(match, *args, **kwargs):\n\n data = match.group(0)\n return (resources[data], True) if data in resources else (data, False)", "def search_key(cls, key, element):\n if isinstance(element, dict):\n for k, v in element.items():\n if k == key:\n return v\n elif isinstance(v, dict):\n cls.search_key(key, v)\n elif isinstance(v, list):\n cls.search_key(key, v)\n elif isinstance(element, list):\n for obj in element:\n v = cls.search_key(key, obj)\n if v:\n return v", "def lookup(dic, key, *keys):\r\n if keys:\r\n return lookup(dic.get(key, {}), *keys)\r\n return dic.get(key)", "def search_hash(word_input,hashtable):\n\n if word_input in hashtable:\n return hashtable[word_input]\n else:\n return None", "def find(self, key):\n if self.head is None:\n return\n itr = self.head\n while itr:\n if itr.data == key:\n return itr.data\n itr = itr.next\n return None", "def _match(self, document: dict, query: dict) -> bool:\n\n matches = [\n self._match(document.get(key), value)\n if isinstance(value, dict) and isinstance(document.get(key), dict)\n else document.get(key) == value\n for key, value in query.items()\n ]\n return all(matches)" ]
[ "0.6650081", "0.64368933", "0.62785393", "0.6271919", "0.606207", "0.60218126", "0.6009384", "0.59157956", "0.5891576", "0.586902", "0.5831622", "0.58111554", "0.58071977", "0.580484", "0.57333195", "0.5707258", "0.5707258", "0.57004094", "0.5668807", "0.561176", "0.5579269", "0.5545683", "0.5541376", "0.5524781", "0.5491723", "0.5485185", "0.5477593", "0.5477031", "0.5449981", "0.54199696", "0.54196894", "0.5405206", "0.53847706", "0.5381589", "0.5362705", "0.53377056", "0.5336607", "0.5320419", "0.52796257", "0.52756554", "0.52414745", "0.5233859", "0.522122", "0.5218139", "0.5207426", "0.5206905", "0.52000886", "0.51999754", "0.5197119", "0.5187457", "0.5172239", "0.515656", "0.51530975", "0.51509225", "0.514524", "0.5141991", "0.5135422", "0.5113002", "0.510177", "0.51001674", "0.5098506", "0.5090382", "0.5090373", "0.50745314", "0.505291", "0.50510097", "0.50490904", "0.50476694", "0.50469816", "0.5015526", "0.5012335", "0.49847344", "0.4983519", "0.4980216", "0.49783805", "0.49764523", "0.49728903", "0.4971967", "0.49644974", "0.4964117", "0.4963304", "0.49520665", "0.4941253", "0.493734", "0.49341795", "0.49310932", "0.49291718", "0.49206796", "0.49047834", "0.48943067", "0.4889719", "0.4886103", "0.48855963", "0.48844397", "0.4881958", "0.48805845", "0.48762184", "0.4874606", "0.48691735", "0.48661977" ]
0.8009377
0
Utility function to populate key_matcher from self.records.
def _add_matches(self): for record in self.records: match_dict={key_to_track: record.get(key_to_track) for key_to_track in self.key_matcher.keys()} self.key_matcher.add(obj=record, match_dict=match_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_source_key(self, match):\n raise NotImplementedError", "def __init__(self):\n self.key_to_record = {}\n self.mutation_to_key = {}\n self._innovation_key_generator = count(0)", "def test_toofewkeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", ())", "def init_record_fields(self, run_record_key, record_fields):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n # make a dataset for the sparse fields allowed. this requires\n # a 'special' datatype for variable length strings. This is\n # supported by HDF5 but not numpy.\n vlen_str_dt = h5py.special_dtype(vlen=str)\n\n # create the dataset with the strings of the fields which are records\n record_group_fields_ds = record_fields_grp.create_dataset(run_record_key,\n (len(record_fields),),\n dtype=vlen_str_dt,\n maxshape=(None,))\n\n # set the flags\n for i, record_field in enumerate(record_fields):\n record_group_fields_ds[i] = record_field", "def initialize(self, keys: List[str]):", "def _make_match_key(self, firstname, lastname):\n return \"{}::{}\".format(firstname.lower().strip(), lastname.lower().strip())", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def init_by_keys(cls, **query):\n raise NotImplementedError()", "def gen_keys():", "def __init__(self, matcher, generate):\n self.matcher = matcher\n self._generate = generate", "def __init__(self, key):\n self.key = [int_mapping(k) for k in key]", "def buildFromRecords(self, records):\n probes = {}\n for record in records:\n fields = {}\n for field in record.split(self.FIELD_DELIMITER):\n index = field.find(self.KEY_VALUE_DELIMITER)\n if index == -1 or len(field) < (index+1):\n raise InvariantViloation('detected invalid probe record in app info file - {}'.format(record))\n fields.update({field[:index]:field[index+1:]})\n if fields:\n try:\n fields[self.FIELD_FILE] = self.trimWorkspace(fields[self.FIELD_FILE], self.workspace)\n probes.update({\n fields[self.FIELD_RECORDER_RETURN_SITE] : AnchoredProbe(\n fields[self.FIELD_NAME], fields[self.FIELD_FILE], fields[self.FIELD_LINE],\n fields[self.FIELD_ATTRIBUTES], fields[self.FIELD_STATUS] == self.PROBE_STATUS_ENABLED,\n fields[self.FIELD_NAME]\n )\n })\n except KeyError as error:\n raise InvariantViloation('detected record missing field {} - \\n{}\\n{}'.format(error, record, fields))\n return probes", "def train_test_split(record_dictionary, ratio=.5):\n\n num_training_records = int(len(record_dictionary) * ratio)\n\n keys = list(record_dictionary.keys())\n\n training_records = np.random.choice(\n keys, num_training_records, replace=False)\n testing_records = [key for key in keys if key not in training_records]\n\n training_dictionary = {\n record: record_dictionary[record]\n for record in training_records\n }\n testing_dictionary = {\n record: record_dictionary[record]\n for record in testing_records\n }\n\n return training_dictionary, testing_dictionary", "def test_invalid_key_gen(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Ge1nder': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)", "def harmonize_keys(self):\n self._data.key_regex_replacements = _key_regex_replacements\n self._data.key_replacements = _key_replacements", "def _make_key(self, record_dict: Dict[str, Any]) -> int:\n return self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys))", "def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: \"Topology\",\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[\n Union[TopologyKey, LibraryChargeTopologyKey],\n PotentialKey,\n ] = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n topology_key = TopologyKey(atom_indices=key)\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n )\n self.key_map[topology_key] = potential_key\n\n if self.__class__.__name__ in [\n \"SMIRNOFFBondCollection\",\n \"SMIRNOFFAngleCollection\",\n ]:\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n assigned_terms=matches,\n topology=topology,\n valence_terms=valence_terms,\n )", "def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')", "def __init__(self):\n self._keys = []\n self._sortKeys = []", "def key_lookup_batch(self, batchiter):\n pass", "def create_matcher(self):\n self.matcher = None\n if \"matcher\" in self.config:\n self.matcher = matcher.Matcher(self.config[\"matcher\"])\n else:\n self.matcher = matcher.TrueMatcher()\n \n self.use_fields_for_id = []\n if \"matcherfield\" in self.config:\n self.use_fields_for_id = self.config[\"matcherfield\"].split(\",\")\n \n if \"clear\" in self.config:\n self.clear_matcher = matcher.Matcher(self.config[\"clear\"])\n self.autoclear = self.auto_acknowledge\n else:\n self.clear_matcher = matcher.FalseMatcher()\n self.autoclear = False", "def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[BondKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n if param.k_bondorder or param.length_bondorder:\n bond = topology.get_bond_between(*key)\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n assert self._get_uses_interpolation(parameter_handler)\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = BondKey(\n atom_indices=key,\n bond_order=fractional_bond_order,\n )\n\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=valence_terms,\n )", "def lookup(self, key):", "def __init__(self):\n super(KeyIterator, self).__init__()\n self.iterator = self.ValueIterator()", "def clean_up(self) -> None:\n self.single_device_matches = collections.defaultdict(\n lambda: collections.defaultdict(list)\n )", "def __init__(self, compound_matcher):\n self._matcher = compound_matcher", "def find_records(self, check, keys=None):\n matches = self._match(check)\n if keys:\n return [self._extract_subdict(rec, keys) for rec in matches]\n else:\n return matches", "def __init__(self):\n # map of (key, index in list)\n self.map = dict()\n \n # list of keys for random selection\n self.keys = []", "def test_keys_eq(self):\n self.assertListEqual(self.result, self.expected)", "def _rebuild_comparedict(self,\n result,\n rewrapped_columns,\n columns,\n rewrapped_keys,\n keys,\n missing_col):\n normalize = lambda x: x if (isinstance(x, str) or not x) else tuple(x)\n rewrapped_columns = normalize(rewrapped_columns)\n rewrapped_keys = normalize(rewrapped_keys)\n columns = normalize(columns)\n keys = normalize(keys)\n\n if rewrapped_keys == keys and rewrapped_columns == columns:\n if isinstance(result, CompareDict):\n key_names = (keys,) if isinstance(keys, str) else keys\n result.key_names = key_names\n return result # <- EXIT!\n\n try:\n item_gen = iter(result.items())\n except AttributeError:\n item_gen = [(self._missing, result)]\n\n if rewrapped_keys != keys:\n def rebuild_keys(k, missing):\n if isinstance(keys, str):\n return k\n key_dict = dict(zip(rewrapped_keys, k))\n return tuple(key_dict.get(c, missing) for c in keys)\n missing_key = self._missing\n item_gen = ((rebuild_keys(k, missing_key), v) for k, v in item_gen)\n\n if rewrapped_columns != columns:\n def rebuild_values(v, missing):\n if isinstance(columns, str):\n return v\n if not nonstringiter(v):\n v = (v,)\n value_dict = dict(zip(rewrapped_columns, v))\n return tuple(value_dict.get(v, missing) for v in columns)\n item_gen = ((k, rebuild_values(v, missing_col)) for k, v in item_gen)\n\n return CompareDict(item_gen, key_names=keys)", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def get_potential_matches_from_record(self, record):\n\n #Query against the con to the sqlite database\n def get_potential_matches(sub_tokens):\n\n search_tokens = \" \".join(sub_tokens)\n SQL = self.get_records_sql.format(search_tokens, self.max_results)\n df = pd.read_sql(SQL,self.target_con)\n return df\n\n tokens_orig = record.tokens_original_order\n tokens_ordered = record.tokens_specific_to_general_by_freq\n df_all = pd.DataFrame()\n\n\n for tokens in [tokens_orig, tokens_ordered]:\n for i in range(len(tokens)):\n\n sub_tokens = tokens[i:]\n if len(sub_tokens)<2:\n df= pd.DataFrame()\n break\n\n df = get_potential_matches(sub_tokens)\n\n if len(df)>0 and len(df)<self.max_results:\n df_all = pd.concat([df_all, df])\n break\n if len(df_all) > 0:\n df_all = df_all.drop_duplicates(\"auto_generated_row_id\")\n\n return self.df_to_record_objects(df_all)", "def setUp(self):\n # $TODO - expand supported field types\n # NOTE: For the purposes of this test, don't expect to compare 'f'-type\n # floats! The precision conversion using struct.pack -> struct.unpack\n # is problematic! Use 'd' instead because Python floats are actually\n # C-doubles. Use '=d' if necessary.\n int1 = -10\n uint1 = 20\n string1 = \"This is a string\" # pack this as a 30-byte string\n char1 = \"J\"\n short1 = 0\n float1 = 5.23\n double1 = -256.3456789107\n ushort1 = 5\n string2 = \"This is another string\" # pack this as a 30-byte string\n long1 = 2147483647\n ulong1 = 3000000000\n\n # Use the fields above in this order.\n self.fieldmap = \"iI30schddH30slL\"\n self.sourcekeys = (\"int1\", \"uint1\", \"string1\", \"char1\", \"short1\",\n \"float1\", \"double1\", \"ushort1\", \"string2\", \"long1\", \"ulong1\")\n\n # Create the raw data that getfields will parse\n self.rawdata = struct.pack(self.fieldmap, int1, uint1, string1, char1,\n short1, float1, double1, ushort1, string2, long1, ulong1)\n\n # This is what getfields should return\n self.knownvalues = {\"char1\":char1, \"short1\":short1, \"ushort1\":ushort1,\n \"int1\":int1, \"uint1\":uint1, \"long1\":long1, \"ulong1\":ulong1,\n \"float1\":float1, \"double1\":double1, \"string1\":string1,\n \"string2\":string2}", "def create_match(self, parser, fields):\n match = parser.OFPMatch()\n for (field, value) in fields.iteritems():\n match.append_field(field, value)\n return match", "def __init__(self):\n self.key2value = {}\n self.key2time = {}", "def secondary_keys_dicts(self):", "def store_matches(\n self,\n parameter_handler: ImproperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n parameter_handler._assert_correct_connectivity(\n val,\n [\n (0, 1),\n (1, 2),\n (1, 3),\n ],\n )\n n_terms = len(val.parameter_type.k)\n for n in range(n_terms):\n smirks = val.parameter_type.smirks\n non_central_indices = [key[0], key[2], key[3]]\n\n for permuted_key in [\n (\n non_central_indices[i],\n non_central_indices[j],\n non_central_indices[k],\n )\n for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]\n ]:\n topology_key = ImproperTorsionKey(\n atom_indices=(key[1], *permuted_key),\n mult=n,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ImproperTorsions\",\n )\n self.key_map[topology_key] = potential_key", "def test_fromkeys(self):\n d = SplayDict.fromkeys(['a', 'b', 'c'], 1)\n self.assertIn('a' , d)\n self.assertIn('b' , d)\n self.assertIn('c' , d)\n self.assertEqual(d['a'] , 1)\n self.assertEqual(d['b'] , 1)\n self.assertEqual(d['c'] , 1)", "def __init__(self, map):\n self.map = map\n self._map = _map = {}\n for key in map.keys():\n _map[key] = key\n self.fast_validate = (10, _map, self.validate)", "def setup_known_fields(self):\n\n kfields = dict(self.known_fields)\n freg = re.compile(r\"(^.+)_\\d+$\")\n for field in self:\n if self[field].get(\"units\") is not None:\n continue\n\n if field in kfields:\n self[field][\"units\"] = kfields[field]\n continue\n\n fs = freg.search(field)\n if fs and fs.groups()[0] in kfields:\n self[field][\"units\"] = kfields[fs.groups()[0]]", "def _match(self, check):\n matches = []\n tests = {}\n for k, v in check.items():\n if isinstance(v, dict):\n tests[k] = CompositeFilter(v)\n else:\n tests[k] = lambda o: _add_tz(o) == _add_tz(v)\n\n for rec in self._records.values():\n if self._match_one(rec, tests):\n matches.append(deepcopy(rec))\n return matches", "def prepare_key (self, key, for_seq):\n r_key = \"%s:%d:%s\" % (self.classkey, for_seq, key)\n return r_key", "def _init_run_records_field(self, run_idx, run_record_key,\n field_name, field_shape, field_dtype):\n\n record_grp = self.run(run_idx)[run_record_key]\n\n # check if it is variable length\n if field_shape is Ellipsis:\n # make a special dtype that allows it to be\n # variable length\n vlen_dt = h5py.special_dtype(vlen=field_dtype)\n\n # this is only allowed to be a single dimension\n # since no real shape was given\n dset = record_grp.create_dataset(field_name, (0,), dtype=vlen_dt,\n maxshape=(None,))\n\n # its not just make it normally\n else:\n # create the group\n dset = record_grp.create_dataset(field_name, (0, *field_shape), dtype=field_dtype,\n maxshape=(None, *field_shape))\n\n return dset", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def __init_matcher(self):\n\t\tmatcher = Matcher()\n\t\tmatcher.set_mentors(self.mentors.find())\n\t\tmatcher.set_migrants(self.migrants.find())\n\t\tprint(matcher.migrants)\n\t\t\n\t\treturn matcher", "def __init__(self):\n self.ds = set()\n self.keys = []", "def _prepare_keys(self, keys):\n # sorting is guaranteed to enable comparisons throughout the class\n for key in keys:\n key.sort()\n\n # ensure no overlap with the primary key\n if self._primary_key in keys:\n keys.remove(self._primary_key)\n\n return keys", "def __init__(self, *values):\n if (len(values) == 1) and (type(values[0]) in SequenceTypes):\n values = values[0]\n self.values = values[:]\n self.values_ = values_ = {}\n for key in values:\n values_[key] = key\n self.fast_validate = (10, values_, self.validate)", "def __init__(self, *args, **kwargs):\n defaultdict.__init__(self)\n self.sortedkeys = None", "def reinit (self):\n #for name, obj in inspect.getmembers (self):\n ## if isinstance (obj, RField):\n # self.keyvals[name] = obj.default\n inspect.getmembers (self)", "def __init__(self, key):\n self.key = key", "def from_items(cls, items):\n new_match = Match()\n new_match.fields = [None] * len(cls.indices)\n for key, val in items:\n try:\n idx = cls.name_to_idx[key]\n new_match.fields[idx] = cls.conversions_to_bin[idx](val)\n except:\n # unknown values\n pass\n return new_match", "def _match(self) -> None:\n self.matched = [i for i in self.data if self.match(i)]\n self.unmatched = [i for i in self.data if not self.match(i)]", "def __init__(self,\n allowRepetition):\n self.__fileName = \"\"\n self.__allowRepetition = allowRepetition\n self.__keyObjs = {}\n self.__keyList = []\n self.__keyCount = {}\n self.__trailer = []", "async def build_key(self):\n self.key = self.update_key()\n search_key_result = await self.redis.keys(self.key)\n if len(search_key_result) == 1:\n self.key = search_key_result[0]\n if self.id == \"*\":\n self.id = self.key.split(':')[0]\n if self.value == \"*\":\n self.value = self.key.split(':')[-1]\n return True", "def data_comparison(observations, records, record):\n for observation in observations:\n if observation != \"_id\":\n try:\n if re.search(observation, f\"{records[record]}\"):\n if not re.search(\n observations[observation], f\"{records[record]}\"\n ):\n records[record] = (\n f\"{records[record]}\"\n + \" --> \"\n + observations[observation]\n )\n except Exception as ex:\n Common.logger.warning(f\"Exception happened in data comparison {ex}\")\n return records", "def _eq_key(self):\n return (\n self.message_type,\n self.topic,\n self.schema_id,\n self.payload,\n self.uuid,\n self.timestamp,\n self.upstream_position_info,\n self.kafka_position_info,\n self.dry_run,\n self.encryption_type\n )", "def MakeKey(self, string, string_1, string_2):\n ...", "def __init__ (self, id, finder, matches):\n\t\tself.id = id\n\t\t# self.inCitesName = fullname\n\t\tfor attr in ['fullname', 'firstName', 'middleName', 'lastName', 'note']:\n\t\t\tsetattr (self, attr, getattr (finder, attr))\n\t\tself.matches = matches\n\t\tself.numMatches = len(matches)", "def __init__(self, replacer, lookup, field=1, skipper=None):\n if isinstance(replacer, str):\n replacer = re.compile(replacer)\n if isinstance(skipper, str):\n skipper = re.compile(skipper)\n\n if not replacer.groups:\n raise DictReplacerError(\"Invalid replacer pattern: no groups \"\n \"specified - '%s'\" % replacer.pattern)\n\n self._replacer = replacer\n self._lookup = lookup\n self._field = field\n self._skipper = skipper", "def test_invalid_key_ID(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'kjhID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def process_data(self):\n num_records = len(self.records_data)\n for i in range(len(self.keys)):\n student_key = self.keys[i]\n if (i < num_records):\n self._load_student_record(student_key,\n self.records_data[i])", "def set_keys(cls, val):\n keys = []\n check = lambda dict, key, default_val: dict[key] if key in dict.keys() else default_val\n for i in range(val['row_max']):\n keys.append([check(val, '{}_{}'.format(x, y), cls.NOTAVAILABLE) \\\n for x, y in list(product([i], list(range(val['col_max']))))])\n if not hasattr(cls, '_keys'):\n cls._keys = {val['name'] : keys}\n else:\n cls._keys[val['name']] = keys", "def AddMapEntryMatcherFlagsToParser(parser):\n is_primary_flag = base.Argument(\n '--set-primary',\n help='The certificate will be used as the default cert if no other certificate in the map matches on SNI.',\n action='store_true')\n hostname_flag = base.Argument(\n '--hostname',\n help='A domain name (FQDN), which controls when list of certificates specified in the resource will be taken under consideration for certificate selection.'\n )\n group = base.ArgumentGroup(\n help='Arguments to configure matcher for the certificate map entry.',\n required=True,\n mutex=True,\n category=base.COMMONLY_USED_FLAGS)\n group.AddArgument(is_primary_flag)\n group.AddArgument(hostname_flag)\n group.AddToParser(parser)", "def generate_from_keyed_source(self, keyed_source, key):\n\n for item in keyed_source:\n line = item[key]\n self.item_lookup[line] = item\n poem = self.add_line(line)\n if poem:\n yield tuple(self.item_lookup[k] for k in poem)", "def __init__(self, *args):\n self.args = args\n self.matchers = []\n for a in args:\n if a is _:\n a = lambda k: True\n elif isinstance(a, basestring):\n a = a.__eq__\n elif isinstance(a, (list, tuple, set)):\n a = (lambda ary: (lambda k: k in ary))(a)\n elif hasattr(a, 'search'):\n a = a.search\n else:\n a = str(a).__eq__\n self.matchers.append(a)", "def test_fields_to_dict(self):\r\n test_data = \\\r\n \"\"\"0\tR27DLI_4812\tR27DLI_600\tR27DLI_727\tU1PLI_403\tU1PLI_8969\tU1PLI_9080\tU1PLI_9526\tW3Cecum_6642\tW3Cecum_8992\r\n1\tU1PLI_7889\r\n2\tW3Cecum_4858\r\n3\tR27DLI_3243\tR27DLI_4562\tR27DLI_6828\tR27DLI_9097\tU1PLI_2780\tU1PLI_67\tU9PSI_10475\tU9PSI_4341\tW3Cecum_5191\"\"\".splitlines() # output from cd-hit\r\n obs = fields_to_dict(test_data)\r\n exp = {\r\n '0': ['R27DLI_4812', 'R27DLI_600', 'R27DLI_727', 'U1PLI_403',\r\n 'U1PLI_8969', 'U1PLI_9080', 'U1PLI_9526', 'W3Cecum_6642', 'W3Cecum_8992'],\r\n '1': ['U1PLI_7889'],\r\n '2': ['W3Cecum_4858'],\r\n '3': ['R27DLI_3243', 'R27DLI_4562', 'R27DLI_6828', 'R27DLI_9097', 'U1PLI_2780', 'U1PLI_67', 'U9PSI_10475', 'U9PSI_4341', 'W3Cecum_5191']}\r\n self.assertEqual(obs, exp)", "def check_keys(self):", "def test_init_hash(self):\n bill = Bill(self.input_hash)\n for key, value in self.input_hash.iteritems():\n self.assertEqual(value, bill.__dict__[key])", "def parse_record(self, record):\n data = defaultdict(list)\n\n for trait, parser in self.parsers:\n for field_name in self.search_fields:\n field = record.get(field_name)\n if not field:\n continue\n parsed = parser.parse(field, field_name)\n if parsed:\n data[trait] += parsed\n\n return data", "def __init__(self, **fields):\r\n \r\n self._by_number = []\r\n self._names = []\r\n self._by_name = {}\r\n self._numbers = {}\r\n \r\n for name in sorted(fields.keys()):\r\n self.add(name, fields[name])", "def __init__(self, aKey):\n self.key = aKey\n\n # CRC can be used to validate a key (very roughly)\n # if you store the CRC from a previous keyword\n # and then compare with a newly generated one and\n # they are the same then chances are the keyword\n # is correct - only a single byte so not that reliable\n self.crc = 0 \n for x in self.key:\n intX = ord(x)\n self.crc = self.crc ^ intX", "def test_initialize_log_data(self):\r\n\r\n ids_bcs_added_field = {('AAAA', ''): 's1', ('TTTT', ''): 's2'}\r\n actual_log_data = initialize_log_data(ids_bcs_added_field)\r\n\r\n expected_log_data = {'TTTT,s2': 0, 'AAAA,s1': 0}\r\n\r\n self.assertEqual(actual_log_data, expected_log_data)\r\n\r\n # Handles added demultiplex field data\r\n ids_bcs_added_field = {('AAAA', '1'): 's1', ('TTTT', '2'): 's2'}\r\n actual_log_data = initialize_log_data(ids_bcs_added_field)\r\n\r\n expected_log_data = {'TTTT,2,s2': 0, 'AAAA,1,s1': 0}\r\n\r\n self.assertEqual(actual_log_data, expected_log_data)", "def _update_key(cls, spec):\n if cls.KEY is not None:\n cls._set_key(spec, spec[\"keys\"].popleft())\n elif cls.REF is not None:\n spec[\"ref\"] = cls.REF", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def subStringMatchExact(target,key):\r\n index = find(target,key)\r\n #print 'here',target,key,index\r\n if index < 0 or len(key) <= 0 or len(target) <= 0:\r\n return ()\r\n matches = subStringMatchExact(target[index+len(key):len(target)],key)\r\n offset = index + len(key)\r\n temp_matches = ()\r\n #print matches\r\n if matches:\r\n for x in range(0, len(matches)) :\r\n temp_matches += ((matches[x] + offset),)\r\n #matches.insert(0,index)\r\n temp_matches = (index,) + temp_matches\r\n return temp_matches", "def test_build_similar_keys_list():\n from FindSimilarIncidentsV2 import build_incident_fields_query\n\n int_res = build_incident_fields_query({u'sla': 0})\n assert int_res == ['sla:=0']\n\n str_res = build_incident_fields_query({u'employeeid': u'1111'})\n assert str_res == [u'employeeid=\"1111\"']\n\n list_res = build_incident_fields_query({u'test': [u'name1', 0]})\n assert list_res == [u'test=\"name1\"', 'test:=0']\n\n list_res = build_incident_fields_query({u'test': []})\n assert list_res == ['test=[]']\n\n escape_res = build_incident_fields_query({u'test': u'\"C:\\\\test\\\\escape\\\\sequence\" test'})\n assert escape_res == [u'test=\"\\\\\"C:\\\\test\\\\escape\\\\sequence\\\\\" test\"']", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def keysWhichMatch(cls, *args):\n if len(cls.keys) < len(args) > 0:\n raise ValueError('Number of keys provided is too long.\\n'\n 'Len Class Keys: %s\\n'\n 'Len Provided Keys: %s\\n' % (len(cls.keys), len(args)))\n\n index = 0\n output = cls.db_key_tuples()\n\n for keyToCheck in args:\n temp = []\n for key in output:\n if key[index] == keyToCheck:\n temp.append(key)\n\n index += 1\n output = temp\n\n return output", "def test_init_with_field_dict(self):\n fields = {\n 'Column 1': 'a=${aaa}',\n 'Column 2': 'b=${bbb}',\n 'Column 3': 'c=${ccc}',\n }\n csv_formatter = CSVFormatter(fields=fields)\n csv = csv_formatter.format_records(self.records)\n\n csv_expected = textwrap.dedent(\"\"\"\\\n #Column 1,Column 2,Column 3\n a=foobar_01,b=8,c=4898FE19\n a=foobar_02,b=160,c=5825D187\n a=foobar_03,b=99,c=3648A436\n \"\"\")\n\n assert csv == csv_expected", "def current_source_key(self, match):\n raise NotImplementedError", "def init(self, datasetInfo):\n #use user's dataset directory if not specified\n if datasetInfo is None: raise RecognizerError(\"No dataset file specified.\")\n \n if os.path.isfile(datasetInfo):\n datasetInfos = [datasetInfo]\n elif os.path.isdir(datasetInfo):\n datasetInfos = [os.path.join(datasetInfo, f) for f in os.listdir(datasetInfo) \\\n if f.endswith('.xml')]\n elif isinstance(datasetInfo, (types.ListType, types.TupleType)):\n datasetInfos = datasetInfo\n else: raise RecognizerError, \"Unknown datasetInfo type: %s\" % type(datasetInfo)\n \n patterns = {}; startsWiths = []; matchers = {}\n for f in datasetInfos:\n info, ns = getXmlEtree(f)\n if not ns.has_key('_'): ns['_'] = ns['_default']\n for dataset in info:\n \n #skip if comment\n if isinstance(dataset, lxml.etree._Comment): continue\n \n ipath = xpath(dataset, './/_:ipath/text()', ns)\n fileTemplate = xpath(dataset, './/_:fileTemplate/text()', ns)\n startsWith = fileTemplate[0:fileTemplate.index('$')]\n filePattern = xpath(dataset, './/_:filePattern/text()', ns)\n if matchers.has_key(startsWith):\n matchers[startsWith].append((re.compile(filePattern), ipath, dataset, ns))\n else: matchers[startsWith] = [(re.compile(filePattern), ipath, dataset, ns)]\n if startsWith != '': startsWiths.append(startsWith)\n startsWithPattern = r'(' + '|'.join(startsWiths) + ')'\n startsWithMatcher = re.compile(startsWithPattern)\n return startsWithMatcher, matchers", "def __init__(__self__, *,\n key: pulumi.Input[str],\n values: pulumi.Input[Sequence[pulumi.Input[str]]]):\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"values\", values)", "def create_mock_key_provider(key_list):\n public_keys = {}\n for k in key_list:\n public_keys[k] = {'Fingerprint': k,\n 'Value': 'ffaa00'}\n key_provider = mock.Mock()\n key_provider.get_public_keys.return_value = public_keys\n return key_provider", "def filter_record_keys(record_list, whitelist_keys):\n\n filtered = [{k: v for k, v in [y for y in list(x.items()) if y[0] in whitelist_keys]} for x in record_list]\n return filtered", "def __init__(self, key):\n Base.__init__(self, key)", "def __init__(self, key):\n Base.__init__(self, key)", "def test_mutate_field(self):\n # Test adding a field\n with self.assertRaises(ValueError):\n self.email.add_field('', '')\n\n self.email.add_field(self.key, self.regex)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertTrue(found_key)\n self.assertEqual(found_regex, self.regex)\n\n # Test getting a field\n with self.assertRaises(LookupError):\n self.email.get_field('')\n\n field = self.email.get_field(self.key)\n self.assertEqual(\n field, {'key': self.key, 'regex': self.regex, 'value': []})\n\n # Test removing a field\n with self.assertRaises(LookupError):\n self.email.remove_field('')\n\n self.email.remove_field(self.key)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertFalse(found_key)\n self.assertNotEqual(found_regex, self.regex)", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def make_fromkey(self, key):\n if key != '':\n def make_runable(match):\n return \"self.components['\" + match.group(0) + \"']\"\n\n runable = re.sub('[^\\(\\)|& ]+', make_runable, key)\n return eval(runable)\n else:\n return ~np.zeros(self.size, dtype=bool)", "def build_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = string\r\n answer['match'] = tmp\r\n return answer", "def _newKey(self, key):\n pass", "def __init__(self, keys_to_track):\r\n self.keys_to_track = keys_to_track\r\n self.tracker = {}\r\n for key_to_track in self.keys_to_track:\r\n self.tracker[key_to_track] = {}", "def test_keyword_extractor(self):\n data = [{\"Header\": \"This is a Header\", \"Paragraph\": \"This is a Paragraph\", \"slide\": 10}]\n keywords = keyword_extractor(data)\n data[0][\"Header_keywords\"] = [\"header\"]\n data[0][\"Paragraph_keywords\"] = [\"paragraph\"]\n self.assertEqual(keywords, data)", "def _build_mysql_dict(self, user_list):\n return_dict = {}\n for user in user_list:\n user_key = self._make_match_key(firstname=user['firstname'],\n lastname=user['lastname'])\n assert user_key not in return_dict\n return_dict[user_key] = {'mysql_user': user,\n 'friven_matched_users': []\n }\n return return_dict", "def _convert(self, dictlike):\n for incoming_key, valuelist in util.dictlike_iteritems(dictlike):\n for value in valuelist:\n new_key = self.keyfunc(value)\n if incoming_key != new_key:\n raise TypeError(\n \"Found incompatible key %r for value %r; this \"\n \"collection's \"\n \"keying function requires a key of %r for this value.\" % (\n incoming_key, value, new_key))\n yield value", "def key_by(self, field: str) -> B[B, E]:\n pass", "def _initialize_parser_keys(self):\n self.parser.source_role_marker = TRANSFER_ROLES.SOURCE\n self.parser.target_role_marker = TRANSFER_ROLES.TARGET\n rack_shape_agg = get_root_aggregate(IRackShape)\n rack_shape_agg.filter = None\n self.parser.allowed_rack_dimensions = [(rs.number_rows,\n rs.number_columns)\n for rs in rack_shape_agg]" ]
[ "0.5777237", "0.5721886", "0.5531455", "0.548444", "0.54103255", "0.54072803", "0.5384197", "0.53711194", "0.5370351", "0.53101146", "0.5296817", "0.52691966", "0.5252932", "0.5242916", "0.5215903", "0.52110225", "0.5207979", "0.5202271", "0.5188836", "0.5164202", "0.51598597", "0.5157279", "0.5147622", "0.51181424", "0.510944", "0.5092469", "0.50793564", "0.50649875", "0.5019659", "0.5011922", "0.5005509", "0.5005509", "0.4994515", "0.49610457", "0.49519533", "0.4945018", "0.4940992", "0.49165928", "0.49037254", "0.48960716", "0.48906964", "0.48875195", "0.48806816", "0.48740357", "0.4865057", "0.48603588", "0.48464397", "0.48437712", "0.48321792", "0.482676", "0.48220468", "0.4817586", "0.48161396", "0.4800049", "0.47986498", "0.47886783", "0.478852", "0.47819808", "0.47798026", "0.47790894", "0.4775895", "0.47650188", "0.4763051", "0.47599944", "0.47488913", "0.4743635", "0.4742833", "0.47421935", "0.47321174", "0.47289124", "0.47262996", "0.4719688", "0.4718317", "0.47132707", "0.47089222", "0.47053817", "0.4702147", "0.4701925", "0.47011286", "0.46795857", "0.46774808", "0.4675527", "0.4672947", "0.46729293", "0.4672556", "0.4667284", "0.46642166", "0.46601832", "0.46601832", "0.46546543", "0.46529356", "0.4647483", "0.46435666", "0.46431467", "0.46392596", "0.46281937", "0.46236205", "0.46202135", "0.4619189", "0.46188742" ]
0.7259984
0
Check if the origin_imgs are flipped correctly.
def _check_flip(origin_imgs, result_imgs): h, w, c = origin_imgs.shape for i in range(h): for j in range(w): for k in range(c): if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(n):\n if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501\n return False\n # yapf: enable\n return True", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def test_flip_vertical() -> None:\n original = create_image(3, 2)\n set_color(original, 0, 0, create_color(0, 0, 0))\n set_color(original, 1, 0, create_color(90, 90, 90))\n set_color(original, 2, 0, create_color(255, 255, 255))\n set_color(original, 0, 1, create_color(10, 10, 10))\n set_color(original, 1, 1, create_color(0, 0, 0))\n set_color(original, 2, 1, create_color(90, 90, 90))\n \n expected = create_image(3, 2)\n set_color(expected, 0, 0, create_color(10, 10, 10))\n set_color(expected, 1, 0, create_color(0, 0, 0))\n set_color(expected, 2, 0, create_color(90, 90, 90))\n set_color(expected, 0, 1, create_color(0, 0, 0))\n set_color(expected, 1, 1, create_color(90, 90, 90))\n set_color(expected, 2, 1, create_color(255, 255, 255))\n \n flipped_vertical = flip_vertical(original)\n \n for x, y, col in flipped_vertical: # tests each colour of each pixel of the filtered sample image and compares it to the expected image\n check_equal('Checking pixel @(' + str(x) + ', ' + str(y) + ')', col, get_color(expected, x, y))", "def verify_legal_rotation(self, direction):\n test_figure = None\n if direction == \"CW\":\n test_figure = self.get_block_positions(self.active_piece.get_cw_rotation())\n elif direction == \"CCW\":\n test_figure = self.get_block_positions(self.active_piece.get_ccw_rotation())\n\n for b_x, b_y in test_figure:\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def drone_has_flipped(self, current_orientation):\n has_flipped = True\n\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n\n rospy.logwarn(\"#### HAS FLIPPED? ########\")\n rospy.logwarn(\"RPY current_orientation\"+str(current_orientation))\n rospy.logwarn(\"max_roll\"+str(self.max_roll) +\n \",min_roll=\"+str(-1*self.max_roll))\n rospy.logwarn(\"max_pitch\"+str(self.max_pitch) +\n \",min_pitch=\"+str(-1*self.max_pitch))\n rospy.logwarn(\"############\")\n\n if current_orientation.x > -1*self.max_roll and current_orientation.x <= self.max_roll:\n if current_orientation.y > -1*self.max_pitch and current_orientation.y <= self.max_pitch:\n has_flipped = False\n\n return has_flipped", "def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):\n of_decoded_images = _of_image_decode(images)\n pil_images = [Image.open(image) for image in images]\n # convert image to BGR\n pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]\n\n for of_decoded_image, pil_decoded_image in zip(\n of_decoded_images, pil_decoded_images\n ):\n of_decoded_image = of_decoded_image.squeeze()\n test_case.assertTrue(len(of_decoded_image.shape) == 3)\n test_case.assertTrue(len(pil_decoded_image.shape) == 3)\n\n diff = of_decoded_image - pil_decoded_image\n diff_index = np.where(diff != 0)\n diff_abs_values = diff[diff_index]\n\n if print_debug_info:\n print(\"of_decoded_image:\\n\", of_decoded_image, of_decoded_image.shape)\n print(\"pil_decoded_image:\\n\", pil_decoded_image, pil_decoded_image.shape)\n print(\"diff_index:\\n\", diff_index)\n print(\"diff_abs_values:\\n\", diff_abs_values)\n print(\n \"of_decoded_image diff:\\n\",\n of_decoded_image[diff_index[0], diff_index[1]],\n )\n print(\n \"pil_decoded_image diff:\\n\",\n pil_decoded_image[diff_index[0], diff_index[1]],\n )\n\n # only green channel has difference of 1\n test_case.assertTrue(np.all(diff_index[-1] == 1))\n test_case.assertTrue(np.all(diff_abs_values == 1))", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def checkForced(self, source, forced):\n self.assertEqual(source.get(\"flux.naive\"),\n self.image.get(self.x, self.y) if forced else self.image.get(self.xcen, self.ycen))", "def _check_same_fov(*args, **kwargs):\n raise_error = kwargs.pop(\"raise_error\", False)\n for i, arg in enumerate(args):\n kwargs[f\"img_#{i}\"] = arg\n errors = []\n for (a_name, a_img), (b_name, b_img) in itertools.combinations(\n kwargs.items(), 2\n ):\n if not a_img.shape[:3] == b_img.shape[:3]:\n errors.append((a_name, b_name, \"shape\"))\n if not np.allclose(a_img.affine, b_img.affine):\n errors.append((a_name, b_name, \"affine\"))\n if len(errors) > 0 and raise_error:\n raise ValueError(\n \"Following field of view errors were detected:\\n\"\n + \"\\n\".join(\n [\n f\"- {e[0]} and {e[1]} do not have the same {e[2]}\"\n for e in errors\n ]\n )\n )\n return len(errors) == 0", "def readout_flipped(self, iamp):\n flipped = ct.c_int()\n self.lib.IsReadoutFlippedByAmplifier(ct.c_int(iamp),\n ct.pointer(flipped))\n return bool(flipped.value)", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass", "def invertible(self):\n a = self._data\n return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]", "def __check_type__(im, ft_axes, orig, name, real_axis=0, shift_axes=[]):\n if type(orig) == image.image:\n im = im.view(image.image) # note: view casting -> this is not the viewer!\n if type(orig.name) is str:\n im.name = name + ' of ' + orig.name\n im.info = orig.info\n pxs = []\n\n for a in ft_axes:\n if a not in orig.spectral_axes:\n im.spectral_axes += [a]\n im.shift_axes = shift_axes\n if type(orig.unit) is str:\n im.unit = ''\n for i in range(im.ndim):\n if i in ft_axes:\n if name == 'IRFT' and real_axis == i:\n pxs += [1 / (orig.pixelsize[i] * 2 * (orig.shape[i] - 1))]\n else:\n pxs += [1 / (orig.pixelsize[i] * orig.shape[i])]\n if type(orig.unit) is str:\n im.unit += orig.unit + '^-1 '\n else:\n try: # TODO: FIX THIS!!!\n pxs += [orig.pixelsize[i]]\n except:\n print('Error in setting pixel size')\n if type(orig.unit) is str:\n im.unit += orig.unit + ' '\n im.pixelsize = pxs\n return (im)\n else:\n return (im)\n\n # ifft shift", "def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None", "def _check_consistency_between_imaging_extractors(self):\n return True", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def _check_fov(img, affine, shape):\n img = check_niimg(img)\n return img.shape[:3] == shape and np.allclose(img.affine, affine)", "def is_orientation_ok(image,k=2,is_first=True):\n\n mid_x, mid_y = int(0.5*image.shape[1]), int(0.5*image.shape[0])\n\n # Get moment for first body half \n image_0 = np.array(image)\n image_0[:,:int(mid_x)] = 0\n image_0 = image_0[:,int(mid_x):]\n moment_0 = get_moment(image_0,k)\n\n # Get moment for second body half\n image_1 = np.array(image)\n image_1[:,int(mid_x):] = 0\n image_1 = np.fliplr(image_1)\n image_1 = image_1[:,int(mid_x):]\n moment_1 = get_moment(image_1,k)\n\n # Compute descriminant and flip flag\n discrim = (moment_0 - moment_1)/(moment_0 + moment_1)\n if discrim < 0:\n ok = False\n else:\n ok = True \n return ok, discrim", "def check_conv_transpose(extract):\n call = extract\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d_transpose\":\n call = call.args[0]\n\n attrs = call.attrs\n if attrs.data_layout != \"NCHW\":\n return False\n\n return True", "def check_image_invert(image_data, border_width = 30):\n \n _, avg_intensity_borders, avg_intensity_inside = \\\n _auto_threshold_borders(image_data,border_width = border_width);\n \n # if image borders are darker than the mean image, it's a surface tension\n # image:\n if(avg_intensity_inside > avg_intensity_borders):\n return False;\n # else, it's a shadowgraph image:\n else:\n return True;", "def _check_origin(self, origin):\n try:\n cachevol_props = self.zfssa.get_volume(origin)\n except exception.VolumeNotFound:\n LOG.debug('Origin %s does not exist', origin)\n return\n\n numclones = cachevol_props['numclones']\n LOG.debug('Number of clones: %d', numclones)\n if numclones <= 1:\n # This cache vol does not have any other clone\n self.zfssa.delete_file(origin)\n else:\n cachevol_props = {'numclones': six.text_type(numclones - 1)}\n self.zfssa.set_file_props(origin, cachevol_props)", "def _check_inverse_transform(self, Z):\n Z_round_trip = self.inverse_func(self.func(Z))\n if not np.allclose(Z_round_trip, Z, equal_nan=True):\n raise UserWarning(\n \"The provided functions are not strictly\"\n \" inverse of each other. If you are sure you\"\n \" want to proceed regardless, set\"\n \" 'check_inverse=False'.\"\n )", "def problem1():\n\n img = load_image(\"data/a1p1.png\")\n display_image(img)\n\n save_as_npy(\"a1p1.npy\", img)\n\n img1 = load_npy(\"a1p1.npy\")\n display_image(img1)\n\n img2 = mirror_horizontal(img1)\n display_image(img2)\n\n display_images(img1, img2)", "def assert_data_correct(self) -> bool:\n if not self.training_folder.exists():\n return False\n # 27: number of characters\n # 27*2: 27 original font characters and 27 folders with morphed version\n if len(list(self.training_folder.iterdir())) not in [27, 27 * 2]:\n return False\n # assert that each character folder has the expected number of images inside\n # expected number is repetitions + original, or just original if no morphing\n # took place\n for directory in self.training_folder.iterdir():\n img_count = len(list(directory.iterdir()))\n if img_count != self.repetitions + 1 and img_count != 1:\n return False\n return True", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def _correct_images(images):\n # From the MNIST website: \"Pixels are organized row-wise. Pixel values are 0 to 255. 0 means\n # background (white), 255 means foreground (black).\"\n # The dataset does not transform the image such that 255 is black, so do that here.\n dtype = _assert_dtype(images)\n max_val = 255 if dtype == dtypes.uint8 else 1.0\n return max_val - images", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def flip(imgs):\n x = random.choice([-1, 0, 1, 2])\n if x == 2:\n return imgs\n else:\n return [cv2.flip(img, x) for img in imgs]", "def mirrorImage(self):\n\n im = Image.open(self.ActivePhoto)\n out = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n out.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))\n print (\"Flipped image\")", "def _is_mapping_correct(self):\n for i in range(self.mapping_size):\n target = self.mapping[i]\n if target < 0:\n continue\n if target == i // 2:\n continue\n return False\n return True", "def testInverted(self):\n invertedClass = xyTransformRegistry[\"inverted\"]\n invertedConfig = invertedClass.ConfigClass()\n affineClass = xyTransformRegistry[\"affine\"]\n invertedConfig.transform.retarget(affineClass)\n affineConfig = invertedConfig.transform\n affineConfig.translation = (1.2, -3.4)\n with lsst.utils.tests.getTempFilePath(\".py\") as filePath:\n self.checkConfig(invertedClass, invertedConfig, filePath)\n inverted = invertedClass(invertedConfig)\n self.checkBasics(inverted)\n for fromPoint in self.fromIter():\n toPoint = inverted.forwardTransform(fromPoint)\n predToPoint = fromPoint - \\\n Extent2D(*invertedConfig.transform.translation)\n for i in range(2):\n self.assertAlmostEqual(toPoint[i], predToPoint[i])", "def flip_images(x, y):\n flip_i = random.sample(range(x.shape[0]), int(x.shape[0] / 2))\n x[flip_i] = x[flip_i, :, ::-1, :]\n y[flip_i] = -y[flip_i]\n\n return x, y", "def compare(actor, frame):\n urlretrieve(actor, 'actor.jpg')\n\n with open(\"frame_image.jpg\", 'wb') as frame_image:\n frame_image.write(base64.b64decode(frame[23:]))\n\n actor_encoding = face_encodings(load_image_file('actor.jpg'))[0]\n frame_encoding = face_encodings(load_image_file('frame_image.jpg'))\n\n os.remove('actor.jpg')\n os.remove('frame_image.jpg')\n for encoding in frame_encoding:\n if compare_faces([actor_encoding], encoding):\n return True\n return False", "def assert_orientation_landscape_image_is_correct(self, rendition):\n\n from willow.plugins.pillow import PillowImage\n\n with rendition.get_willow_image() as willow_image:\n image = PillowImage.open(willow_image)\n # Check that the image is the correct size (and not rotated)\n self.assertEqual(image.get_size(), (600, 450))\n # Check that the red flower is in the bottom left\n # The JPEGs have compressed slightly differently so the colours won't be spot on\n colour = image.image.convert(\"RGB\").getpixel((155, 282))\n self.assertAlmostEqual(colour[0], 217, delta=25)\n self.assertAlmostEqual(colour[1], 38, delta=25)\n self.assertAlmostEqual(colour[2], 46, delta=25)\n\n # Check that the water is at the bottom\n colour = image.image.convert(\"RGB\").getpixel((377, 434))\n self.assertAlmostEqual(colour[0], 85, delta=25)\n self.assertAlmostEqual(colour[1], 93, delta=25)\n self.assertAlmostEqual(colour[2], 65, delta=25)", "def is_origin(self) -> bool:\n return self.x == 0 and self.y == 0", "def hasMoved(self):\r\n if BLENDER_MODE == 'BGE':\r\n world_tranform = self.obj.worldTransform.copy()\r\n elif BLENDER_MODE == 'BPY':\r\n world_tranform = self.obj.matrix_world.copy()\r\n\r\n # if objed has not yet been checked\r\n if not self.old_worldTransform:\r\n self.old_worldTransform = world_tranform\r\n return True\r\n\r\n elif self._areDifferent_Mat44(world_tranform, self.old_worldTransform, self.moveThresholdLoc, self.moveThresholdRot):\r\n # moved since last check\r\n self.old_worldTransform = world_tranform\r\n return True\r\n else:\r\n # did not move since last check\r\n return False", "def check_improvement_direction(self): # pragma: no cover\n good = self.good_rev.mean_value\n bad = self.bad_rev.mean_value\n\n if self.is_return_code_mode():\n if good == 1 or bad == 0:\n self._set_failed_return_code_direction_results()\n return False\n return True\n\n direction = self.improvement_direction\n if direction is None:\n return True\n if (bad > good and direction > 0) or (bad < good and direction < 0):\n self._set_failed_direction_results()\n return False\n return True", "def _array_is_aligned(self):\n rot_matrix = self.axes_wcs.wcs.pc\n return np.allclose(rot_matrix, np.eye(self.axes_wcs.wcs.naxis))", "def test_on_skimage_png(self):\n from_skimage = diffread(TEST_PNG)\n\n self.assertTupleEqual(from_skimage.shape, (256, 256))\n self.assertTrue(np.allclose(from_skimage, np.ones_like(from_skimage)))", "def __lt__(self, img):\r\n ordering = self.config['algorithm_ordering']\r\n ordering = ordering[1:] if ordering.startswith('-') else ordering\r\n\r\n if ordering == \"filename\":\r\n return sorted([self.filename, img.filename])[0] == img.filename\r\n if ordering == 'width':\r\n return self.absolute_width <= img.absolute_width\r\n elif ordering == 'height':\r\n return self.absolute_height <= img.absolute_height\r\n elif ordering == 'area':\r\n return self.absolute_width * self.absolute_height <= img.absolute_width * img.absolute_height\r\n else:\r\n return max(self.absolute_width, self.absolute_height) <= max(img.absolute_width, img.absolute_height)", "def flipped_dimensions(transformation, size):\n dim = len(size)\n # transform start point\n start = [0.0] * dim\n transformed_start = transformation.TransformPoint(start)\n flipped = [False] * dim\n for i in range(dim):\n # set current end point and transform it\n end = [0.0] * dim\n end[i] = size[i]\n transformed_end = transformation.TransformPoint(end)\n # check, if transformed_start and transformed_end changed position\n flipped[i] = transformed_start[i] > transformed_end[i]\n return flipped", "def assert_data_fragments_correct(self) -> bool:\n read_path = Path(os.environ[\"DATA_PATH\"]) / \"fragments\"\n if not read_path.exists():\n return False\n bin_images = [img for img in read_path.iterdir() if \"binarized\" in img.name]\n if len(bin_images) == 0:\n return False\n return True", "def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)", "def reverseCurves(self):\n self.data.reverse()\n return True", "def is_non_inverting(self):\n\n return False", "def _infer_direction(self):\n data = self.get_data(None)\n if data is not None:\n # Infer the direction from the data\n if data._size > 1:\n data = data[0:2].array\n return bool(\n data.item(\n 0,\n )\n < data.item(\n 1,\n )\n )\n # --- End: if\n\n # Still here?\n data = self.get_bounds_data(None)\n if data is not None:\n # Infer the direction from the bounds\n b = data[(0,) * (data.ndim - 1)].array\n return bool(\n b.item(\n 0,\n )\n < b.item(\n 1,\n )\n )\n\n # Still here? Then infer the direction from the units.\n return not self.Units.ispressure", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True", "def check_if_mirror_extended(self, lqs):\n if lqs.size(1) % 2 == 0:\n lqs_1, lqs_2 = torch.chunk(lqs, 2, dim=1)\n if torch.norm(lqs_1 - lqs_2.flip(1)) == 0:\n self.is_mirror_extended = True", "def check_if_original(article):\n num_img = len(article.find_all(\"img\"))\n return num_img < 2", "def test_inverse_transform(self):", "def testDegenerate(self):\n srt = asarray(self.copy())\n srt.sort(axis=1)\n return (srt[:,:-1] == srt[:,1:]).any(axis=1)", "def equals(self, image: 'BaseImage') -> bool:\n assert isinstance(image, BaseImage)\n im1 = pygame.image.tostring(self._surface, 'RGBA')\n im2 = pygame.image.tostring(image._surface, 'RGBA')\n return im1 == im2", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def is_different(image1, image2):\n gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)\n gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)\n\n (score, diff) = compare_ssim(gray1, gray2, full=True)\n diff = (diff * 255).astype(\"uint8\")\n\n thresh = cv2.threshold(diff, 0, 255,\n cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n return bool(cnts)", "def test__inverse_transform_continuous(self):", "def _check_integrity(self):\n\n count = 0\n for (x, y) in self.__players[ChessGame.BLACK].union(\n self.__players[ChessGame.WHITE]):\n assert (x, y) in self.__board\n count += 1\n\n assert count == len(self.__board)", "def is_solved(self):\n colors = ['green', 'blue', 'red', 'orange', 'white', 'yellow']\n for row in range(3):\n for column in range(3):\n if self.front[row][column] != colors[0]:\n return False\n for row in range(3):\n for column in range(3):\n if self.back[row][column] != colors[1]:\n return False\n for row in range(3):\n for column in range(3):\n if self.right[row][column] != colors[2]:\n return False\n for row in range(3):\n for column in range(3):\n if self.left[row][column] != colors[3]:\n return False\n for row in range(3):\n for column in range(3):\n if self.up[row][column] != colors[4]:\n return False\n for row in range(3):\n for column in range(3):\n if self.down[row][column] != colors[5]:\n return False\n return True", "def save(self, data, filename, tporigin=None):\n if self.save_as is not None:\n ending = self.save_as\n else:\n try:\n ending = os.path.splitext(self.maskfiles[0])[-1]\n except Exception as e:\n ending = os.path.splitext(self.featurefiles[0])[-1]\n # if ending in ['.mhd']:\n # skio.imsave(filename + ending, data, plugin='simpleitk')\n if ending in ['.raw']:\n data.astype('int16').tofile(filename + ending)\n elif ending in ['.png', '.jpeg', '.png', '.pgm', '.pnm', '.gif', '.tif', '.tiff']:\n if np.max(data) <= 1.0 and np.min(data) >= 0:\n np.int8(np.clip(data * 256, 0, 255))\n imsave(filename + ending, data.squeeze())\n else:\n # ending in ['.nii', '.hdr', '.nii.gz', '.gz', '.dcm'] or len(data.squeeze().shape) > 2:\n if self.correct_orientation and tporigin is not None:\n # we corrected the orientation and we have the information to undo our wrongs, lets go:\n aligned_data = Volume(data, np.eye(4), \"RAS\") # dummy initialisation if everything else fails\n try:\n tporigin_vol = ni.open_image(os.path.join(tporigin, self.maskfiles[0]), verbose=False)\n except:\n try:\n tporigin_vol = ni.open_image(os.path.join(tporigin, self.featurefiles[0]), verbose=False)\n except Exception as e:\n logging.getLogger('data').warning('could not correct orientation for file {} from {}'\n .format(filename, tporigin))\n logging.getLogger('data').debug('because {}'.format(e))\n try:\n aligned_vol = Volume(data, tporigin_vol.aligned_transformation, tporigin_vol.system)\n aligned_data = aligned_vol.copy_like(tporigin_vol)\n except Exception as e:\n logging.getLogger('data').warning('could not correct orientation for file {} from {}'\n .format(filename, tporigin))\n logging.getLogger('data').debug('because {}'.format(e))\n\n finally:\n ni.save_volume(filename + \".nii.gz\", aligned_data, True)\n else:\n if self.correct_orientation:\n logging.getLogger('data').warning(\n 'could not correct orientation for file {} since tporigin is None: {}'\n .format(filename, tporigin))\n nib.save(nib.Nifti1Image(data, self.affine), filename + \".nii.gz\")", "def check_for_inversion(cxr_img_norm):\n\n # Define image rim\n rim_thickness = max(np.shape(cxr_img_norm)) // 20\n rim_array = [\n list(cxr_img_norm[:rim_thickness, :].flatten()),\n list(cxr_img_norm[:, :rim_thickness].flatten()),\n list(cxr_img_norm[-rim_thickness:, :].flatten()),\n list(cxr_img_norm[:, -rim_thickness:].flatten())]\n\n rim_list = [pixel for rim in rim_array for pixel in rim]\n\n # Compare mean of rim to mean of whole image\n img_mean = np.mean(cxr_img_norm)\n rim_mean = np.mean(np.array(rim_list))\n\n inversion_check = (rim_mean > img_mean)\n\n return inversion_check", "def test_array_orientation_consistency_tilt():\n samples = 128\n p = FringeZernike(Z2=1000, samples=samples)\n ps = PSF.from_pupil(p, 1)\n idx_y, idx_x = np.unravel_index(ps.data.argmax(), ps.data.shape) # row-major y, x\n assert idx_x == ps.center_x\n assert idx_y > ps.center_y", "def correct(img_src, shift, destfile, crop=None):\n (y0, y1, x0, x1) = crop\n (shift_y, shift_x) = shift\n tf_shift = transform.SimilarityTransform(translation=[shift_x, shift_y])\n img = transform.warp(img_src, tf_shift)\n res = img[-y0:-y1, -x0:-x1, :]\n io.imsave(destfile, res)\n return destfile", "def _correct_image(self, image):\n return Image(image.data, self._rotation_offset, self._dx, self._dy, self._timestamp).data", "def testTranslate(self):\n (w,h) = self.im8_1.getSize()\n \n self.im8_1.reset()\n self.im8_1.setPixel(128, (w//2,h//2))\n \n for i in range(10):\n xi = random.randint(-w//4,w//4)\n yi = random.randint(-h//4,h//4)\n translate(self.im8_1, self.im8_2, xi, yi, 0)\n self.im8_3.reset()\n self.im8_3.setPixel(128, (w//2+xi,h//2+yi))\n (x,y) = compare(self.im8_2, self.im8_3, self.im8_2)\n self.assertTrue(x<0)\n translate(self.im8_1, self.im8_2, -xi, -yi, 0)\n self.im8_3.reset()\n self.im8_3.setPixel(128, (w//2-xi,h//2-yi))\n (x,y) = compare(self.im8_2, self.im8_3, self.im8_2)\n self.assertTrue(x<0)", "def rotated(self):\n return self.pol_lat != 90.", "def test_random_vertical_flip(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomVerticalFlip(prob=1)\n _image, _label = transform(image, label)\n _image, _label = transform(_image, _label)\n assert (image == _image).all()\n assert (label == _label).all()\n \n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomVerticalFlip(prob=1)\n _image, _label = transform(image, label)\n _image, _label = transform(_image, _label)\n assert (image == _image).all()\n assert (label == _label).all()", "def collate_fn_flip(self, batch):\n FT = torch.FloatTensor\n img, uvd_gt = zip(*batch)\n flip = random.randint(1, 10000)%2\n # Do flipping\n # 0 = left, 1 = right\n hand_side = 1\n if flip:\n hand_side = 0 \n\n new_img = []\n new_uvd = []\n for i, u in batch:\n if flip:\n i = i.transpose(Image.FLIP_LEFT_RIGHT)\n u[:, 0] = 0.999 - u[:, 0]\n i = np.asarray(i)\n i = i/255.0\n i = IMG.imgshape2torch(i)\n new_img.append(i)\n new_uvd.append(u)\n \n new_img = FT(new_img)\n new_uvd = FT(new_uvd)\n return new_img, new_uvd, hand_side", "def equals(self, other: InputTransform) -> bool:\n return super().equals(other=other) and (self.reverse == other.reverse)", "def isinvertible(self):\n if np.all(np.abs(self.maroots) > 1):\n return True\n else:\n return False", "def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)", "def _hasChangedDirection(motionPts: list) -> tuple:\n dispPts = Ball._getDisplacements(motionPts)\n xDir = yDir = None\n xChange = yChange = False\n for dispPt in dispPts:\n # Compute differences\n xDirNow = RIGHT if dispPt[0] > 0 else LEFT\n yDirNow = DOWN if dispPt[1] > 0 else UP\n # Look for x changes\n if xDir is None:\n xDir = xDirNow\n elif xDirNow != xDir:\n xChange = True\n # Look for y changes\n if yDir is None:\n yDir = yDirNow\n elif yDirNow != yDir:\n yChange = True\n return xChange, yChange", "def is_new_based_on_imgs(soup):\n\n \n \n prev_hashes = get_prev_img_hashes()\n temp_hashes = get_temp_img_hashes(soup)\n\n if len(temp_hashes.difference(prev_hashes))>0:\n print(\"new, based on images\")\n return True\n else:\n return False", "def _check_normalization(self):\n lastDistance = None\n distance = None\n for idx in xrange(len(self) - 1):\n distance = self[idx+1][0] - self[idx][0]\n\n # first run\n if lastDistance is None:\n lastDistance = distance\n continue\n\n if lastDistance != distance:\n return False\n\n lastDistance = distance\n\n return True", "def _check_orig(self):\n if self.is_dir():\n self._orig = False\n return\n\n parts = self._path.split('.')\n try:\n if parts[-1] == 'tgz':\n self._orig = True\n elif parts[-2] == 'tar':\n if (parts[-1] in Compressor.Opts or\n parts[-1] in Compressor.Aliases):\n self._orig = True\n except IndexError:\n self._orig = False", "def is_inverse(self, other):\n return (self * other).is_identity() and (other * self).is_identity()", "def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100", "def can_rotate(self, current_piece):\n rotated_shape = current_piece.template[current_piece.next_rotation()] \n for x in range(pieces.Piece.TEMPLATE_WIDTH):\n for y in range(pieces.Piece.TEMPLATE_HEIGHT):\n board_x = current_piece.get_pos_x() + x\n board_y = current_piece.get_pos_y() + y \n if board_x < 0 and rotated_shape[y][x]:\n return False\n if board_y < 0 and rotated_shape[y][x]:\n return False\n if board_x >= game_config.BOARD_BOX_COUNT_X:\n return False\n if board_y >= game_config.BOARD_BOX_COUNT_Y:\n return False\n if self.board.get_cell(board_x, board_y) and rotated_shape[y][x]:\n return False\n return True", "def _diff_images(img_before, img_after):\n width_before, height_before = img_before.size\n width_after, height_after = img_after.size\n data_before = img_before.getdata()\n data_after = img_after.getdata()\n\n width, height = max(width_before, width_after), max(height_before, height_after)\n offset_ax = (width - width_before) // 2\n offset_ay = (height - height_before) // 2\n offset_bx = (width - width_after) // 2\n offset_by = (height - height_after) // 2\n\n diff = 0\n for y in range(height):\n for x in range(width):\n ax, ay = x - offset_ax, y - offset_ay\n bx, by = x - offset_bx, y - offset_by\n if (ax < 0 or bx < 0 or ax >= width_before or bx >= width_after or\n ay < 0 or by < 0 or ay >= height_before or by >= height_after):\n diff += 1\n else:\n if data_before[ax + ay *width_before] != data_after[bx + by * width_after]:\n diff += 1\n try:\n return round(diff / float(width * height), 4)\n except ZeroDivisionError:\n return 0.0", "def is_symetric(self):\n for x, y in _triangle_range(self.size):\n\n if self.read(x, y) != self.read(y, x):\n return False\n\n return True", "def __flip(self, image, landmarks, run_prob=0.5):\n if np.random.rand() < run_prob:\n return image, landmarks\n image = np.fliplr(image)\n landmarks[:, 0] = image.shape[1] - landmarks[:, 0]\n landmarks = LandmarkHelper.flip(landmarks, landmarks.shape[0])\n return image, landmarks", "def check_2x2_solved(self):\n return self._grid[0][0] == 0 and self._grid[0][1] == 1 \\\n and self._grid[1][0] == self._width*1 and self._grid[1][1] == (1 + self._width * 1)", "def is_versor(self) -> bool:\n return np.isclose(np.linalg.norm(self.A), 1.0)", "def mirror(img):\n return img[:, ::-1]", "def is_versor(self) -> np.ndarray:\n return np.isclose(np.linalg.norm(self.array, axis=1), 1.0)", "def assert_data_correct(self) -> bool:\n corr_char = self.assert_data_characters_correct()\n print(\"Character data correct?\", corr_char)\n corr_font = FontImages().assert_data_correct()\n print(\"Font data correct?\", corr_font)\n corr_frag = self.assert_data_fragments_correct()\n print(\"Fragment data correct?\", corr_frag)\n corr_train_aug = self.assert_train_augmented()\n print(\"Train data augmented?\", corr_train_aug)\n truth_agree = corr_char and corr_font and corr_frag\n return True if truth_agree else False", "def verify(self):\n if not self.verify_succ_pred_fingers():\n return False\n\n return True", "def compare_faces(encodings, encoding_to_check):\n\n return list(np.linalg.norm(encodings - encoding_to_check, axis = 1))", "def test_flip():\n template_r = np.array([\n [0.5, 0],\n [0.7, 0],\n ])\n template_g = np.array([\n [0.9, 0],\n [0.2, 0],\n ])\n template_b = np.array([\n [0.1, 0],\n [0.4, 0],\n ])\n template = np.dstack([template_r, template_g, template_b])\n return template, np.flipud(np.fliplr(template))", "def collate_fn_no_flip(self, batch):\n FT = torch.FloatTensor\n img, uvd_gt = zip(*batch)\n\n new_img = []\n new_uvd = []\n for i, u in batch:\n if self.pred_img_side == 'left':\n i = i.transpose(Image.FLIP_LEFT_RIGHT)\n u[:, 0] = 0.999 - u[:, 0]\n i = np.asarray(i)\n i = i/255.0\n i = IMG.imgshape2torch(i)\n new_img.append(i)\n new_uvd.append(u)\n \n new_img = FT(new_img)\n new_uvd = FT(new_uvd)\n return new_img, new_uvd", "def _compositions_swapped(self, thermo):\n assert self._ref_indicators is not None\n\n indicators = self._singlet_comparison(thermo)\n\n for list1, list2 in zip(indicators, self._ref_indicators):\n comp_swapped = True\n for ind1, ind2 in zip(list1, list2):\n if ind1 == ind2:\n comp_swapped = False\n if comp_swapped:\n return True\n return False", "def assertWarp(self):\n if self.rect.size.height != 256 or self.rect.width != 128:\n raise ValueError(\"Bad image size for body warped image\")\n if self.format != self.format.R8G8B8:\n raise ValueError(\"Bad image format for warped image, must be R8G8B8\")", "def test_inverse_order_true(self):\n percentiles_cube = self.test_cube.copy(\n data=np.flipud(self.test_cube.data))\n plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n self.assertTrue(plugin_instance.inverse_ordering)", "def check_scaled_shape(orig: torch.Tensor, scaled: torch.Tensor, scale_factor: float) -> bool:\n N, C, H, W = orig.shape\n Hc = int(scale_factor * H)\n Wc = int(scale_factor * W)\n\n return scaled.shape == (N, C, Hc, Wc)", "def chk_vert_sym(self):\n for j in xrange(self.width):\n for i in xrange(int(round(self.height/2))):\n if self.rows[i][j] == self.rows[self.height - (i + 1)][j]:\n continue\n else:\n return False\n return True", "def is3DImage(self):\n\t\treturn self.is3D" ]
[ "0.7891626", "0.7111249", "0.588529", "0.5814684", "0.579397", "0.5752564", "0.5637771", "0.5609224", "0.5603559", "0.5602122", "0.55922", "0.5581342", "0.5511366", "0.5511366", "0.5508238", "0.54916257", "0.545488", "0.54529166", "0.5440112", "0.5437185", "0.5426509", "0.54239786", "0.5414612", "0.5376516", "0.5331621", "0.53161705", "0.5309192", "0.52966225", "0.52963984", "0.52750075", "0.52750075", "0.52750075", "0.52750075", "0.52602804", "0.5240879", "0.524012", "0.52373064", "0.5233345", "0.52282894", "0.52218115", "0.519064", "0.5178225", "0.5173376", "0.5146969", "0.51421416", "0.5132427", "0.51225", "0.51012844", "0.51010126", "0.5099131", "0.50921935", "0.5085102", "0.50791585", "0.5065076", "0.5050663", "0.5047774", "0.50467736", "0.50412893", "0.5039912", "0.5038952", "0.5030319", "0.50274986", "0.500612", "0.50050855", "0.5003157", "0.50024796", "0.49795097", "0.4968203", "0.4960242", "0.4941347", "0.4935197", "0.49313053", "0.49295717", "0.4925282", "0.492485", "0.49214888", "0.49187547", "0.49164483", "0.49148148", "0.49083102", "0.49029592", "0.4902518", "0.48959774", "0.4893855", "0.48931098", "0.48925936", "0.48831174", "0.4882616", "0.48801786", "0.48767793", "0.48751163", "0.48717496", "0.48689964", "0.48672983", "0.48390704", "0.48387656", "0.48381668", "0.48371324", "0.48324543", "0.4831778" ]
0.8528314
0
make request to subredit and return numbers of subscribers
def recurse(subreddit, hot_list=[]): headers = {'User-Agent': 'Mauricio'} url = 'http://www.reddit.com/r/' + subreddit + '/hot/.json' r = requests.get(url, headers=headers, params=parameters) if r.status_code == 200: answer_list_10 = r.json().get('data').get('children') for top in range(len(answer_list_10)): hot_list.append((answer_list_10[top].get('data').get('title'))) if len(answer_list_10) >= 100: parameters['after'] = r.json().get('data').get('after') recurse(subreddit, hot_list) return(hot_list) else: return(None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_subscribers(subreddit):\n\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n headers = {\"User-Agent\": \"my-integration/1.2.3\"}\n\n response = get(url=url, headers=headers)\n\n if response.status_code == 200:\n # print(response.json())\n\n response_json = response.json()\n data = response_json.get('data')\n subscribers = data.get(\"subscribers\")\n\n return subscribers\n\n return 0", "def number_of_subscribers(subreddit):\n url = \"https://api.reddit.com/r/{}/about\".format(subreddit)\n header = {'User-Agent': 'CustomClient/1.0'}\n request = requests.get(url, headers=header, allow_redirects=False)\n\n if request.status_code != 200:\n return 0\n jreq = request.json()\n\n if 'data' in jreq:\n return jreq.get(\"data\").get(\"subscribers\")\n else:\n return 0", "def number_of_subscribers(subreddit):\n\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n headers = {'user-agent': 'request'}\n response = requests.get(url, headers=headers, allow_redirects=False)\n if str(response) != '<Response [200]>':\n return 0\n response_json = response.json()\n subs = response_json.get('data').get('subscribers')\n return subs", "def number_of_subscribers(subreddit):\n\n url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)\n\n headers = {'User-Agent': 'My User Agent 1.0'}\n\n request = requests.get(url, headers=headers)\n req = request.json()\n\n if request.status_code == 404:\n return 0\n\n subs = req.get('data').get('subscribers')\n return subs", "def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}/about.json\"\n headers = {\n 'User-Agent': 'My User Agent 1.0',\n 'From': '[email protected]'\n }\n r_subs = requests.get(url.format(subreddit), headers=headers)\n if r_subs.status_code == 200:\n data = r_subs.json()['data']\n subscribers = data.get('subscribers')\n if subscribers is not None:\n return subscribers\n return 0", "def number_of_subscribers(subreddit):\n h = {'user-agent': 'GEEK1050'}\n link = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n req = requests.get(link, headers=h)\n\n req_data = req.json().get(\"data\").get(\"subscribers\")\n for element in req_data['children']:\n print(element['children']['title'])", "def number_of_subscribers(subreddit):\n header = {'User-Agent': 'Chrome/90.0.4430.212 Safari/537.36'}\n req = requests.get('https://www.reddit.com/r/{}/about.json'\n .format(subreddit), allow_redirects=False,\n headers=header)\n if req.status_code == 200:\n subscribers = req.json().get('data').get('subscribers')\n return subscribers\n else:\n return 0", "def number_of_subscribers(subreddit):\n header = {\"User-agent\": \"darth\"}\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n response = (requests.get(url, headers=header))\n if response.status_code != 200:\n return 0\n return response.json().get('data').get('subscribers')", "def number_of_subscribers(subreddit):\n url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)\n response = requests.get(url,\n allow_redirects=False,\n headers={'User-Agent': 'MyChromeBook'})\n if response:\n suscribers_number = response.json().get('data').get('subscribers')\n return suscribers_number\n else:\n return 0", "def number_of_subscribers(subreddit):\n URL = 'https://api.reddit.com/r/{}/about'.format(subreddit)\n header = {'User-Agent': 'Custom-User'}\n\n resp = requests.get(URL, headers=header).json()\n try:\n return resp['data']['subscribers']\n except Exception:\n return 0", "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "def number_of_subscribers(subreddit):\n url = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit), headers={\"User-Agent\": \"kalkidan\"})\n if url.status_code == 200:\n return url.json().get(\"data\").get(\"subscribers\")\n else:\n return 0", "def number_of_subscribers(subreddit):\n link = 'http://www.reddit.com/r/{}/about.json'.format(subreddit)\n red = requests.get(link, headers={'User-Agent': 'tope628'}).json()\n try:\n subs = red.get('data').get('subscribers')\n except:\n return 0\n if red is None:\n return 0\n return subs", "def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n header = {\"Content-Type\": \"application/json\",\n \"User-Agent\": \"Mozilla/5.0\"}\n request = requests.get(\n url,\n headers=header,\n allow_redirects=False)\n if request.status_code >= 300:\n return 0\n return json.loads(request.content.decode(\"utf-8\"))[\"data\"][\"subscribers\"]", "def number_of_subscribers(subreddit):\n r = requests.get('https://api.reddit.com/r/{}/about.json'\n .format(subreddit),\n headers={'user-agent': 'ianscustomthing'},\n allow_redirects=False)\n rj = r.json()\n if rj.get('message') == 'Not Found':\n return 0\n s = rj.get('data').get('subscribers')\n return s", "def number_of_subscribers(subreddit):\n\n import requests\n\n resInf = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit),\n headers={\"User-Agent\": \"My-User-Agent\"},\n allow_redirects=False)\n if resInf.status_code >= 300:\n return 0\n\n return resInf.json().get(\"data\").get(\"subscribers\")", "def number_of_subscribers(subreddit):\n import requests\n headers = {'User-Agent': 'Godfather'}\n about = requests.get(\n 'https://www.reddit.com/r/{}/about.json'.format(\n subreddit), headers=headers).json()\n try:\n subscribers = about.get('data').get('subscribers')\n if subscribers is None:\n raise TypeError\n return subscribers\n except:\n return 0", "def number_of_subscribers(subreddit):\n url = 'https://www.reddit.com/r/{}/about.json'\n headers = {'user-agent': 'X-Modhash'}\n url_format = requests.get(url.format(subreddit), headers=headers).json()\n try:\n name = url_format['data']['subscribers']\n return name\n except:\n return 0", "def number_of_subscribers(subreddit):\n url_rsubs = \"https://api.reddit.com/r/{}/about\".format(subreddit)\n headers = {'User-Agent': 'Python3'}\n response = requests.get(url_rsubs, headers=headers,\n allow_redirects=False)\n if str(response) != \"<Response [200]>\":\n return 0\n r_json = response.json()\n subs_count = r_json.get('data').get('subscribers')\n return subs_count", "def number_of_subscribers(subreddit):\n response = requests.get('https://www.reddit.com/r/{}/about.json'\n .format(subreddit),\n headers={'User-Agent': 'Camilo@holberton'},\n allow_redirects=False)\n if response.status_code == 200:\n response = response.json()\n data = response.get('data')\n subs_count = data.get('subscribers')\n if data and subs_count:\n return subs_count\n return 0", "def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}.json\".format(subreddit)\n r = requests.get(url, headers={'User-agent': 'shoji'},\n allow_redirects=False)\n data = r.json()\n if not r.status_code == 200:\n return 0\n try:\n sub = data.get(\"data\")\n children = sub.get(\"children\")\n subreddit = children[0].get(\"data\")\n subscriber_count = subreddit.get(\"subreddit_subscribers\")\n except Exception as e:\n print(\"Something went wrong\\n {}\".format(e))\n return 0\n\n return subscriber_count", "def number_of_subscribers(subreddit):\n if subreddit is None or type(subreddit) is not str:\n return 0\n BASE_URL = 'http://www.reddit.com/r/{}/about.json'\n head = {'User-Agent': 'Mozilla/5.0'}\n r = requests.get(BASE_URL.format(subreddit), headers=head)\n return r.json().get('data', {}).get('subscribers', 0)", "def number_of_subscribers(subreddit):\n # Set the Default URL strings\n base_url = 'https://www.reddit.com'\n api_uri = '{base}/r/{subreddit}/about.json'.format(base=base_url,\n subreddit=subreddit)\n\n # Set an User-Agent\n user_agent = {'User-Agent': 'Python/requests'}\n\n # Get the Response of the Reddit API\n res = requests.get(api_uri, headers=user_agent,\n allow_redirects=False)\n\n # Checks if the subreddit is invalid\n if res.status_code in [302, 404]:\n return 0\n\n # Returns the total subscribers of the subreddit\n return res.json().get('data').get('subscribers')", "def test_json_get_subscribers(self) -> None:\n stream_name = gather_subscriptions(self.user_profile)[0][0][\"name\"]\n stream_id = get_stream(stream_name, self.user_profile.realm).id\n expected_subscribers = gather_subscriptions(self.user_profile, include_subscribers=True)[0][\n 0\n ][\"subscribers\"]\n result = self.client_get(f\"/json/streams/{stream_id}/members\")\n result_dict = self.assert_json_success(result)\n self.assertIn(\"subscribers\", result_dict)\n self.assertIsInstance(result_dict[\"subscribers\"], list)\n subscribers: List[int] = []\n for subscriber in result_dict[\"subscribers\"]:\n self.assertIsInstance(subscriber, int)\n subscribers.append(subscriber)\n self.assertEqual(set(subscribers), set(expected_subscribers))", "def test_get_subscriptions_auth(self):\n url = reverse('xds_api:interest-list-subscriptions')\n _, token = AuthToken.objects.create(self.user_1)\n # subscribe user 1 to interest list 3\n self.list_3.subscribers.add(self.user_1)\n self.list_3.save()\n response = self.client \\\n .get(url, HTTP_AUTHORIZATION='Token {}'.format(token))\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(self.list_3.subscribers.all()),\n len(responseDict))", "def test_get_subscriptions(self):\n pass", "def test_successful_subscriptions_list_subscribers(self) -> None:\n result = self.api_get(\n self.test_user,\n \"/api/v1/users/me/subscriptions\",\n {\"include_subscribers\": \"true\"},\n )\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))", "def test_subscriber(self) -> None:\n stream_name = gather_subscriptions(self.user_profile)[0][0][\"name\"]\n self.make_successful_subscriber_request(stream_name)", "def get_subscribed_users(self, obj):\n return obj.subscribed_users.count()", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('reseller', 'v1', http=http)\n\n results = service.subscriptions().list(maxResults=10).execute()\n subscriptions = results.get('subscriptions', [])\n if not subscriptions:\n print('No subscriptions found.')\n else:\n print('Subscriptions:')\n for subscription in subscriptions:\n print('{0} ({1}, {2})'.format(subscription['customerId'],\n subscription['skuId'], subscription['plan']['planName']))", "def _count_subscriptions(self):\n for partner in self:\n subscriptions = self.env['subscription.subscription']\n count = subscriptions.sudo().search_count([('partner_id', '=', partner.id)])\n for child in partner.child_ids:\n count += subscriptions.sudo().search_count([('partner_id', '=', child.id)])\n partner.subscriptions_count = count", "def get_subscriber_count(self, response):\n return response.css('.yt-subscriber-count')\\\n .extract_first(default='')", "def get_subscriber(self, subscriber_id):\n req_data = [ str(subscriber_id) ]\n return self.request(\"find:Contact.stats\", req_data)", "def subscribers(id):\n return core.query(schema.streamBySubscribers, id)", "def test_get_subscription(self):\n pass", "def get_freq(urn, top=50, cutoff=3):\n r = requests.get(\"https://api.nb.no/ngram/urnfreq\", json={'urn':urn, 'top':top, 'cutoff':cutoff})\n return Counter(dict(r.json()))", "def all_client_number():\n\n url = CMX_URL + '/api/location/v2/clients/count'\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n response = requests.get(url, headers=header, auth=CMX_AUTH, verify=False)\n response_json = response.json()\n clients_number = response_json['count']\n return clients_number", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def test_interest_list_subscribe(self):\n list_id = self.list_2.pk\n url = reverse('xds_api:interest-list-subscribe', args=(list_id,))\n _, token = AuthToken.objects.create(self.user_1)\n response = self.client \\\n .patch(url, HTTP_AUTHORIZATION='Token {}'.format(token))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(self.list_2.subscribers.all()), 1)", "def invitation_received_no(request):\n if request.user.is_authenticated:\n profile_obj = CustomUser.objects.get(id__exact=request.user.id)\n qs_count = Relationship.objects.invitation_received(profile_obj).count()\n return {'invites_num': qs_count}\n return {}", "def buildSubscriptionList(self):\r\n self._clearLists()\r\n unreadById = {}\r\n\r\n if not self.userId:\r\n self.getUserInfo()\r\n\r\n unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })\r\n unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']\r\n for unread in unreadCounts:\r\n unreadById[unread['id']] = unread['count']\r\n\r\n feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })\r\n subscriptions = json.loads(feedsJson, strict=False)['subscriptions']\r\n\r\n for sub in subscriptions:\r\n categories = []\r\n if 'categories' in sub:\r\n for hCategory in sub['categories']:\r\n cId = hCategory['id']\r\n if not cId in self.categoriesById:\r\n category = Category(self, hCategory['label'], cId)\r\n self._addCategory(category)\r\n categories.append(self.categoriesById[cId])\r\n\r\n try:\r\n feed = self.getFeed(sub['id'])\r\n if not feed:\r\n raise\r\n if not feed.title:\r\n feed.title = sub['title']\r\n for category in categories:\r\n feed.addCategory(category)\r\n feed.unread = unreadById.get(sub['id'], 0)\r\n except:\r\n feed = Feed(self,\r\n sub['title'],\r\n sub['id'],\r\n sub.get('htmlUrl', None),\r\n unreadById.get(sub['id'], 0),\r\n categories)\r\n if not categories:\r\n self.orphanFeeds.append(feed)\r\n self._addFeed(feed)\r\n\r\n specialUnreads = [id for id in unreadById\r\n if id.find('user/%s/state/com.google/' % self.userId) != -1]\r\n for type in self.specialFeeds:\r\n feed = self.specialFeeds[type]\r\n feed.unread = 0\r\n for id in specialUnreads:\r\n if id.endswith('/%s' % type):\r\n feed.unread = unreadById.get(id, 0)\r\n break\r\n\r\n return True", "def subscriptions(id='None'):\n\trows = mongo_data({}, [\"publisher_id\",\"dt_hour\", \"new_subs\"],\"subscribers\")\n\t#returns [{_id:...,field1:...,field2:...}]\n\n\n\tCOLS = [\"publisher_id\", \"dt_hour\", \"new subs\"]\n\tROWS = [[y[\"publisher_id\"],y[\"dt_hour\"],y[\"new_subs\"]] for y in rows]\n\n\tTITLE = 'SUBSCRIPTIONS'\n\n\treturn render_template(\"simple_tester_report.html\", cols=COLS, rows=ROWS, report_title=TITLE);", "def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids", "def get_subscribers(cls, topic, count, starting_at_callback=None):\n\t\tquery = cls.all()\n\t\tquery.filter('topic_hash =', utils.sha1_hash(topic))\n\t\tquery.filter('subscription_state = ', cls.STATE_VERIFIED)\n\t\tif starting_at_callback:\n\t\t\tquery.filter('callback_hash >=', utils.sha1_hash(starting_at_callback))\n\t\tquery.order('callback_hash')\n\n\t\treturn query.fetch(count)", "def fetchTAC(self):\n\n last_hour = datetime.datetime.now().date() - datetime.timedelta(hours = 1)\n last_hour = \"{}{}{}\".format(\"'\", last_hour, \"'\")\n last_hour = datetime.date(2011, 4, 5)\n\n self.hlr_cur.execute(\"SELECT id FROM Subscriber WHERE updated >= {date};\".format(date = last_hour))\n subscribers = self.hlr_cur.fetchall()\n\n parsed_data = {}\n unique_imei = {}\n #uid_count = 0\n\n for subscriber in subscribers:\n self.hlr_cur.execute(\"SELECT IMEI FROM Equipment WHERE id = (SELECT equipment_id FROM EquipmentWatch WHERE subscriber_id = {s_id});\".format(s_id = subscriber[0]))\n parsed_imei = self.hlr_cur.fetchall()\n\n if len(parsed_imei) > 0:\n for imei in parsed_imei:\n imei_number = imei[0] \n\n if imei_number not in unique_imei:\n unique_imei[imei_number] = subscriber[0]\n\n uid = unique_imei[imei_number]\n parsed_data.setdefault((uid), str(imei_number)[:8])\n\n self.saveRecords(parsed_data)", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def test_json_get_subscribers_for_guest_user(self) -> None:\n guest_user = self.example_user(\"polonius\")\n never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed\n\n # A guest user can only see never subscribed streams that are web-public.\n # For Polonius, the only web-public stream that he is not subscribed at\n # this point is Rome.\n self.assert_length(never_subscribed, 1)\n\n web_public_stream_id = never_subscribed[0][\"stream_id\"]\n result = self.client_get(f\"/json/streams/{web_public_stream_id}/members\")\n result_dict = self.assert_json_success(result)\n self.assertIn(\"subscribers\", result_dict)\n self.assertIsInstance(result_dict[\"subscribers\"], list)\n self.assertGreater(len(result_dict[\"subscribers\"]), 0)", "def get_count(username):\n return get_contributor(username)[\"count\"]", "def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e", "def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def client_count(request):\n return request.param", "def test_get_total_users_get(self):\n pass", "def test_is_ims_sub_activities(self):\r\n emaSession = ema_functions.emaLogin()\r\n session = {}\r\n session['emaSession'] = emaSession\r\n sub1 = class_ims_ema.sub('+353760000001')\r\n #test1 = sub1.subscriberCreate(session)\r\n test2 = sub1.subscriberGet(session)\r\n #test3 = sub1.subscriberDelete(session)\r\n test4 = sub1.subscriberGet(session)\r\n #self.assertTrue(test1.status_code == 200 and test2.status_code == 200 and test3.status_code == 200 and test4.status_code == 500)\r\n self.assertTrue(test2.status_code == 200 and test4.status_code == 500)", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def get_total_users(request):\n number_of_users = User.objects.count()\n res = {\n 'total_users': number_of_users,\n }\n return Response(res, status=status.HTTP_200_OK)", "def get_count(type,baseurl,user,password):\n payload = {'request': 'count', 'type': type}\n r = requests.get(baseurl, params=payload, auth=HTTPBasicAuth(user, password), verify=False)\n return int(r.text)", "def message_nums(request):\n if request.user.is_authenticated:\n return {'unread_nums': request.user.usermessage_set.filter(has_read=False).count()}\n else:\n return {}", "def n_subimissions_per_day( url, headers ):", "def get_members(base_url, end_url):\n reps, content_len, bill_info, votes = get_next_page(base_url, end_url, 1)\n return reps", "def subscribers_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"subscribers\", access_token)", "def test_getting_segment_subscribers(self):\n pass", "def get_subscribers(self, \n order=\"created_at desc\",\n offset=None,\n count=None):\n req_data = [ None, order, fmt_paging(offset, count)]\n return self.request(\"query:Contact.stats\", req_data)", "def test_search_regons(client):\n subjects, request_id = client.search_regons([\"755016841\", \"216973362\", \"862391869\"])\n\n assert len(subjects) == 3", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def test_admin_sms_subscriber_view_list(self):\n response = self.client.get('/admin/sms_module/smscampaignsubscriber/')\n self.failUnlessEqual(response.status_code, 200)", "async def get_subscriptions(\n self,\n\t\tfields: Optional[List[BaseUserGroupFields]] = None,\n\t\toffset: Optional[int] = None,\n\t\tcount: Optional[int] = None,\n\t\t**kwargs\n ) -> donut.GetSubscriptionsResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"donut.getSubscriptions\", params)\n model = donut.GetSubscriptionsResponse\n return model(**response).response", "def subscribers_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"subscribers\", access_token)", "def subscribed(cls, team):\n return cls.query(\n cls.status == 'subscribe',\n cls.team == team.lower()\n ).fetch(100)", "def test_process_subscriptions(self):\n pass", "def test_import_subscribers(dbsession, registry, mailgun, populated_mailing_list):\n paginator = mailgun.list_members(populated_mailing_list)\n items = paginator[\"items\"]\n assert len(items) == 1\n assert items[0][\"address\"] == \"[email protected]\"", "def get_subscribers(self) -> Iterator[Any]:\n for subscription in self._subscriptions[self.id]:\n yield subscription.subscriber", "def countAndGetCallInvoice(self,id,start,finish):\n self.calls = 0\n return self.getNumOfInvoice(id,start,finish)", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def combined_inbox_count(request):\r\n count = 0\r\n for func in inbox_count_sources():\r\n counts = func(request)\r\n if counts:\r\n for value in counts.itervalues():\r\n try:\r\n count = count + int(value)\r\n except (TypeError, ValueError):\r\n pass\r\n return {'combined_inbox_count': count,}", "def subscribers(self) -> pulumi.Output[Sequence['outputs.BudgetActionSubscriber']]:\n return pulumi.get(self, \"subscribers\")", "def GET(self):\n web.header(\"Content-Type\",\"application/json; charset=utf-8\")\n data = web.input(module=\"module\")\n module = data[\"module\"]\n count = db_module.get_module_newsNum(module)\n result = json.dumps({\"count\": count})\n return result", "def test_successful_subscriptions_list(self) -> None:\n result = self.api_get(self.test_user, \"/api/v1/users/me/subscriptions\")\n json = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", json)\n for stream in json[\"subscriptions\"]:\n self.assertIsInstance(stream[\"name\"], str)\n self.assertIsInstance(stream[\"color\"], str)\n self.assertIsInstance(stream[\"invite_only\"], bool)\n # check that the stream name corresponds to an actual\n # stream; will throw Stream.DoesNotExist if it doesn't\n get_stream(stream[\"name\"], self.test_realm)\n list_streams = [stream[\"name\"] for stream in json[\"subscriptions\"]]\n # also check that this matches the list of your subscriptions\n self.assertEqual(sorted(list_streams), sorted(self.streams))", "def do_GET(self):\n # Check that basic auth is used.\n authorization = self.headers.get(\"Authorization\")\n if authorization == \"\" or authorization is None:\n self.send_response(400)\n self.end_headers()\n return\n\n if \"/subscriptions\" in self.path:\n self.send_response(200)\n self.end_headers()\n self.wfile.write(json.dumps(sample_client_subscription).encode())\n else:\n self.send_response(200)\n self.end_headers()\n self.wfile.write(json.dumps(sample_client_list_response).encode())", "def _get_notifications(self):\r\n student = self._student('GET')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n self._success_response({\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_required': student.num_required,\r\n 'count_graded': student.num_graded,\r\n 'count_available': student.num_pending\r\n })", "def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)", "def on_subscribe(self, request, ssn):\n if 'hub.challenge' not in request.GET:\n logger.error(f'Missing hub.challenge in subscription verification {ssn.pk}!')\n tasks.save.delay(\n pk = ssn.pk,\n subscribe_status = 'verifyerror',\n verifyerror_count = ssn.verifyerror_count + 1\n )\n return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)\n\n if not request.GET.get('hub.lease_seconds', '').isdigit():\n logger.error(f'Missing integer hub.lease_seconds in subscription verification {ssn.pk}!')\n tasks.save.delay(\n pk = ssn.pk,\n subscribe_status = 'verifyerror',\n verifyerror_count = ssn.verifyerror_count + 1\n )\n return Response('hub.lease_seconds required and must be integer', status=HTTP_400_BAD_REQUEST)\n\n if ssn.unsubscribe_status is not None:\n logger.error(f'Subscription {ssn.pk} received subscription verification request,'\n f' but its was explicitly unsubscribed before.')\n return Response('Unsubscribed')\n\n tasks.save.delay(\n pk = ssn.pk,\n subscribe_status = 'verified',\n lease_expiration_time = now() + timedelta(seconds=int(request.GET['hub.lease_seconds'])),\n connerror_count = 0,\n huberror_count = 0,\n verifyerror_count = 0,\n verifytimeout_count = 0\n )\n logger.info(f'Got {ssn.pk} subscribe confirmation from hub.')\n return HttpResponse(request.GET['hub.challenge'])", "def update_db_and_get_subs(mail, login_info):\n subscribers = check_for_subscribers(mail, login_info)\n\n db_subs = []\n if 'subscribers' in db.keys():\n db_subs = db['subscribers']\n\n for sub in subscribers:\n if sub not in db_subs:\n print(f'New subscriber added: {sub}')\n db_subs.append(sub)\n subject = \"Welcome to F1 Digest!\"\n send_email(subject, subscribe_body, sub, login_info)\n\n db['subscribers'] = db_subs\n\n return db_subs", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def return_count(self, number, request, environ, start_response,\n response_headers):\n response_type = self.content_negotiation(\n request, environ, self.DereferenceTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n '$count requires plain text or octet-stream formats', 406)\n data = str(number).encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return [data]", "def listSubscriptions() -> object:\n\n db = Db()\n return db.Subscriptions.objects().to_json()", "def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})", "def check_for_subscribers(mail, login_info):\n ADDRESS, PASSWORD = login_info\n\n try:\n mail.select('inbox')\n data = mail.search(None, 'ALL') \n except:\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(ADDRESS, PASSWORD)\n mail.select('inbox')\n data = mail.search(None, 'ALL')\n \n mail_ids = data[1]\n id_list = mail_ids[0].split() \n\n if not id_list:\n return []\n\n first_email_id = int(id_list[0])\n latest_email_id = int(id_list[-1])\n\n subscribers = []\n\n for i in range(latest_email_id, first_email_id-1, -1):\n data = mail.fetch(str(i), '(RFC822)')\n for response_part in data:\n arr = response_part[0]\n if isinstance(arr, tuple):\n msg = email.message_from_string(str(arr[1],'utf-8'))\n email_from = msg['from']\n subscribers.append(email_from)\n\n return subscribers", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def get(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n organization = model.organization.get_organization(orgname)\n query = model.organization_skus.get_org_subscriptions(organization.id)\n\n if query:\n subscriptions = list(query.dicts())\n for subscription in subscriptions:\n subscription[\"sku\"] = marketplace_subscriptions.get_subscription_sku(\n subscription[\"subscription_id\"]\n )\n return subscriptions\n else:\n return []\n abort(401)", "def subscribers(self) -> pulumi.Input[Sequence[pulumi.Input['BudgetActionSubscriberArgs']]]:\n return pulumi.get(self, \"subscribers\")", "def task2(self, doc) -> dict:\n country_count = {}\n match_records = []\n for entry in self.records:\n if (entry['event_type'] =='read'):\n if entry['subject_doc_id'] == doc:\n match_records.append(entry)\n for rec in match_records:\n if (rec['visitor_country'] in country_count):\n country_count[rec['visitor_country']] += 1\n else:\n country_count[rec['visitor_country']] = 1\n print(country_count)\n return country_count", "def make_requests_with_n_recipients(num, url):\n recipients = [str(x) for x in xrange(num)]\n\n data = create_request('TEST MESSAGE!', recipients)\n return call_api_endpoint(url, data)", "def count(self, index):\n if isinstance(index, list):\n index = ','.join(index)\n req = requests.get(\n urljoin(self.base_url, '{0}/_count'.format(index)),\n verify=self.verify_certs)\n return req.json()['count']", "def test_subscribe(mocker, api: API, account: Account, order, sku_id, activation):\n api.regnum.order.return_value = order\n api.activation.activate.return_value = activation\n assert account.subscribe(sku_id) == activation[\"id\"]\n api.regnum.order.assert_called_with(\n \"USERNAME\", sku_id, 1, date.today(), timedelta(days=365)\n )\n api.activation.activate.assert_called_with(\"USERNAME\", 5678, mocker.ANY, date.today())\n assert order in account.orders\n assert activation in account.activations", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_search_accounts(client):\n subjects, request_id = client.search_accounts(\n [\n \"70506405335016096312945164\",\n \"20028681823250598006154766\",\n \"31872831997646186715413833\",\n ]\n )\n\n assert len(subjects) == 3", "def get_user_count_registered_against_business_and_survey(business_id: str, survey_id: str, is_transfer) -> int:\n logger.info(\"Attempting to get user count\", business_ids=business_id, survey_id=survey_id)\n url = f'{app.config[\"PARTY_URL\"]}/party-api/v1/pending-survey-users-count'\n data = {\"business_id\": business_id, \"survey_id\": survey_id, \"is_transfer\": is_transfer}\n response = requests.get(url, params=data, auth=app.config[\"BASIC_AUTH\"])\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n raise ApiError(logger, response)\n return response.json()", "def get_num_of_sales_per_customer_ids():\n\n # your code" ]
[ "0.76823676", "0.7451481", "0.7448277", "0.73799473", "0.72858876", "0.7222261", "0.71943074", "0.7162595", "0.7158168", "0.7157379", "0.71367633", "0.71244204", "0.71223074", "0.71045095", "0.7061548", "0.70038885", "0.6953015", "0.6951698", "0.6925322", "0.69033", "0.68208605", "0.6781992", "0.6664515", "0.6480759", "0.63943744", "0.6179491", "0.5926989", "0.5910972", "0.584116", "0.5834524", "0.5819679", "0.57960165", "0.57633096", "0.57536143", "0.5733111", "0.5712486", "0.5692815", "0.5686368", "0.56734544", "0.5670715", "0.56250316", "0.5598913", "0.5594021", "0.5578406", "0.5570526", "0.55686647", "0.55599535", "0.5537437", "0.5516226", "0.55025864", "0.5496542", "0.54861593", "0.54860896", "0.5475061", "0.5442715", "0.5441692", "0.5429206", "0.5424884", "0.54176426", "0.5417051", "0.5409801", "0.54016", "0.5401264", "0.5386065", "0.5384175", "0.53769654", "0.5362586", "0.5356682", "0.5355129", "0.5348996", "0.5344721", "0.5340788", "0.5339492", "0.53357875", "0.5331334", "0.5328251", "0.53182834", "0.5311812", "0.5309014", "0.5301066", "0.5296513", "0.52914953", "0.5291231", "0.52804106", "0.52612185", "0.52530265", "0.52508956", "0.5249992", "0.52470446", "0.5243257", "0.52417016", "0.5240664", "0.523488", "0.523282", "0.52315754", "0.5230689", "0.5230689", "0.5230689", "0.52213705", "0.5215978", "0.5214212" ]
0.0
-1
A cumulative metric is one that needs a prior value to calculate the next value. i.e. only the deltas for the current observed values are reported. A noncumulative metric is one where the absolute observed value is reported every time.
def is_cumulative(self): return self.name in ( "app.cpu", "app.uptime", "app.disk.bytes", "app.disk.requests", "app.mem.majflt", "app.io.wait", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cumulative_returns(self):\n return (1 + self.pct_change).cumprod()", "def get_cumulative_model(self):\n cm = None\n for ac in self.ac:\n if ac[0] is None:\n continue\n m = self.get_model(last=False, a=ac[0], c=ac[1])\n cm = m if cm is None else cm+m\n return cm", "def cumulative_distribution(self, X):\n raise NotImplementedError", "def cummulative_return(df):\n cumm_daily_return=(df[-1]/df[0])-1\n \n return cumm_daily_return", "def cumulate(self):\n name = self.getName() + '_Cumulated'\n c_names = self.getColumnNames()\n # create the new table an add the first column to it\n t_cumulate = DataTable(name)\n time = self.getColumn(0)\n t_cumulate.addColumn(c_names[0],time)\n # for each other column, cumulate values \n for i in range(1,self.getNbColumns()):\n c = self.getColumn(i)\n N = [0]\n for j in range(1,len(c)):\n n = (c[j]+c[j-1])/2*(time[j]-time[j-1]) + N[-1]\n N.append(n)\n pass\n t_cumulate.addColumn(c_names[i],N)\n pass\n \n return t_cumulate", "def cumulate(self, value):\n last = 0 if len(self) == 0 else self[-1]\n if value is None:\n self.append(last)\n else:\n self.append(last + value)", "def calculate_cumulative_reward(self, gamma: float = 1.0):\n num_transitions = len(self)\n assert num_transitions > 0, \"called on empty trajectory\"\n rewards = self.reward\n discounts = [gamma**i for i in range(num_transitions)]\n return sum(reward * discount for reward, discount in zip(rewards, discounts))", "def cumulate(tobject):\n # forward suffix\n return asrootpy(tobject.GetCumulative(True, uuid.uuid4().get_hex()))", "def from_cumulative(self, cumulative):\n return self._dist_cls.from_cumulative(self._edges, cumulative)", "def get_bprop_cumsum(self):\n cumsum = P.CumSum(exclusive=self.exclusive, reverse=not self.reverse)\n\n def bprop(x, axis, out, dout):\n return cumsum(dout, axis), zeros_like(axis)\n return bprop", "def cumprob(self):\r\n return self.probabilities.cumsum(-1)", "def show_cumulative_mig(model):\r\n return sum(cumulative_mig_list)", "def calculate_cumulative_returns(returns, starting_value=0, out=None):\n if len(returns) < 1:\n return returns.copy()\n\n nanmask = np.isnan(returns)\n if np.any(nanmask):\n returns = returns.copy()\n returns[nanmask] = 0\n\n allocated_output = out is None\n if allocated_output:\n out = np.empty_like(returns)\n\n np.add(returns, 1, out=out)\n out.cumprod(axis=0, out=out)\n\n if starting_value == 0:\n np.subtract(out, 1, out=out)\n else:\n np.multiply(out, starting_value, out=out)\n\n if allocated_output:\n if returns.ndim == 1 and isinstance(returns, pd.Series):\n out = pd.Series(out, index=returns.index)\n elif isinstance(returns, pd.DataFrame):\n out = pd.DataFrame(\n out, index=returns.index, columns=returns.columns,\n )\n\n return out", "def get_cumulative_rewards(rewards, # rewards at each step\n gamma=0.99 # discount for reward\n ):\n\n cumulative_rewards = np.empty_like(rewards)\n cumulative_rewards = cumulative_rewards.astype(float)\n cumulative_rewards[-1] = rewards[-1]\n\n for index in range(len(rewards) - 2, -1, -1):\n discount = cumulative_rewards[index + 1] * gamma\n reward = rewards[index]\n cumulative_rewards[index] = discount + reward\n\n return cumulative_rewards # <array of cumulative rewards>", "def cum_sum(self):\n\n # create cdo command and runit\n cdo_command = \"cdo -timcumsum\"\n run_this(cdo_command, self, output=\"ensemble\")", "def _cumulative_diff(item, key):\n global _prev_values\n if key in _prev_values:\n # Subsequent item, return difference from previous\n prev = _prev_values[key]\n _prev_values[key] = item\n\n result = copy.deepcopy(item)\n for fname in result[\"fields\"]:\n result[\"fields\"][fname] -= prev[\"fields\"][fname]\n\n return result\n else:\n # First item, no difference, return zeros.\n _prev_values[key] = item\n result = copy.deepcopy(item)\n fields = result[\"fields\"]\n for fname in fields:\n fields[fname] = 0\n return result", "def get_cumulative_distribution(self):\n srt_dists = np.sort(self._distances)\n tol = 1E-3\n for i in range(1, len(srt_dists)):\n while srt_dists[i] - srt_dists[i-1] < tol:\n srt_dists[i] += tol\n return {\"x\": srt_dists-srt_dists[0], \n \"P\": np.linspace(0.0, 1.0, len(self._distances), endpoint=False)}", "def __get_cumulative (self, event_df, inputon, inputoff, num_bins = 10):\r\n \r\n input_mark = lambda x, inputon, inputoff: 1 if x == inputon else -1 if x == inputoff else 0\r\n \r\n if len(event_df) == 0:\r\n pass\r\n else:\r\n df = event_df.copy()\r\n df[inputon] = df['Events'].apply(lambda x: input_mark(x, inputon, inputoff))\r\n start_cum = df[df['Events']== inputon][['Time',inputon]] #defaults is input state\r\n start_cum[self.subject+'_cum'] = start_cum[inputon].cumsum()\r\n #start_cum[self.subject+'_cumP'] = 100*start_cum[self.subject+'_cum']/start_cum[inputon].sum()\r\n start_cum.reset_index(inplace=True)\r\n start_cum.drop(columns = inputon,inplace = True)\r\n start_cum.drop(columns = 'index',inplace = True)\r\n return start_cum", "def cumsum(self):\n return self._lift(lambda c: c.cumsum)", "def get_cum_returns(self,df, init_inv = 1):\n return ((1 + df).cumprod()) * init_inv", "def get_cumulative_rewards(rewards, # rewards at each step\r\n gamma=0.99 # discount for reward\r\n ):\r\n cumulative_rewards = []\r\n prev = 0\r\n\r\n for r in reversed(rewards):\r\n prev = r + gamma * prev\r\n cumulative_rewards.append(prev)\r\n cumulative_rewards.reverse()\r\n return cumulative_rewards", "def cumulative_left (self):\n # TODO: include proper errors\n return Line (self.bins,\n self.values[::-1].cumsum ()[::-1])\n #return Line (self.bins, self.sum - self.values.cumsum ())", "def cumulative_correlation(self) -> int:\n return self._cumulative_correlation", "def cumulative_distribution(self, dist='current'):\n \n dictDist = {'current': np.cumsum(self.current),\n 'prior': np.cumsum(self.prior),\n 'posterior': np.cumsum(self.posterior, axis=1)\n }\n \n cdf = dictDist[dist]\n \n return cdf", "def cumulative_distribution(self):\n\n cum_dd = []\n sum_p = 0\n for k, p in reversed(self.dd):\n sum_p += p\n cum_dd.append((k, sum_p))\n return list(reversed(cum_dd))", "def test_kde_cumulative(limits):\n data = np.random.normal(0, 1, 1000)\n density = _kde(data, custom_lims=limits, cumulative=True)[1]\n np.testing.assert_almost_equal(round(density[-1], 3), 1)", "def office_calculate_cumulative_misfit(parser, args, params):\n local_args = parser.parse_known_args(args)\n control.calculate_cumulative_misfit(params)", "def calculate_cumulative_returns_by_group(quantile_ret_ts: pd.DataFrame):\n # 1_period_return 5_period_return 10_period_return\n # factor_quantile Date\n # 1 2010-06-17 0.002230 0.021172 -0.014775\n # 2010-06-18 0.036203 0.017436 -0.016843\n # 2010-06-21 -0.004873 -0.017346 -0.035416\n # 2010-06-22 -0.000315 -0.036443 -0.046313\n # 2010-06-23 -0.010813 -0.039430 -0.039475\n # ... ... ... ...\n quantile_ret_ts_ = quantile_ret_ts.copy().add(1) # type: pd.DataFrame\n # todo period larger than 1 is not right cumulative return\n cumulative_ret_by_group = quantile_ret_ts_.groupby(level=0).cumprod()\n cumulative_ret_by_group.dropna(inplace=True)\n cumulative_ret_by_group.sort_index(level=1, inplace=True)\n return cumulative_ret_by_group", "def getCummulativeValues(self):\n self.cumulativePhaseHeightInRing1 = np.cumsum(self.phaseHeightInRing1)\n self.cumulativePhaseHeightInRing2 = np.cumsum(self.phaseHeightInRing2)\n self.cumulativeLeftCriticalPointsRing1 = np.cumsum(self.leftCriticalPointsRing1)\n self.cumulativeRightCriticalPointsRing1 = np.cumsum(self.rightCriticalPointsRing1)\n self.cumulativeLeftCriticalPointsRing2 = np.cumsum(self.leftCriticalPointsRing2)\n self.cumulativeRightCriticalPointsRing2 = np.cumsum(self.rightCriticalPointsRing2)\n\n if(self.init1 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing1):\n self.cumulativeLeftCriticalPointsRing1[index] = value + self.init1\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing1):\n self.cumulativeRightCriticalPointsRing1[index] = value + self.init1\n\n if(self.init2 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing2):\n self.cumulativeLeftCriticalPointsRing2[index] = value + self.init2\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing2):\n self.cumulativeRightCriticalPointsRing2[index] = value + self.init2\n\n self.cumulativePhaseHeightInRing1 = np.insert(self.cumulativePhaseHeightInRing1, 0, 0.0)\n self.cumulativePhaseHeightInRing2 = np.insert(self.cumulativePhaseHeightInRing2, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing1 = np.insert(self.cumulativeLeftCriticalPointsRing1, 0, 0.0)\n self.cumulativeRightCriticalPointsRing1 = np.insert(self.cumulativeRightCriticalPointsRing1, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing2 = np.insert(self.cumulativeLeftCriticalPointsRing2, 0, 0.0)\n self.cumulativeRightCriticalPointsRing2 = np.insert(self.cumulativeRightCriticalPointsRing2, 0, 0.0)", "def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0", "def get_cumgain(df, outcome_col='y', treatment_col='w', treatment_effect_col='tau',\n steps=100, normalize=False, random_seed=42):\n\n cumlift = get_cumlift(df, outcome_col, treatment_col, treatment_effect_col, steps, random_seed)\n\n # cumulative gain = cumulative lift x (# of population)\n cumgain = cumlift.mul(cumlift.index.values, axis=0) / 100 * df.shape[0]\n\n if normalize:\n cumgain = cumgain.div(cumgain.iloc[-1, :], axis=1)\n\n return cumgain", "def test_cumulative_distribution_fit_call_pd(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def cum_returns(returns, starting_value=None):\n\n # df_price.pct_change() adds a nan in first position, we can use\n # that to have cum_returns start at the origin so that\n # df_cum.iloc[0] == starting_value\n # Note that we can't add that ourselves as we don't know which dt\n # to use.\n if pd.isnull(returns.iloc[0]):\n returns.iloc[0] = 0.\n\n df_cum = np.exp(np.log(1 + returns).cumsum())\n\n if starting_value is None:\n return df_cum - 1\n else:\n return df_cum * starting_value", "def cumulative_sum(self, lis):\n new_list = []\n for i in range(len(lis)):\n if i == 0:\n new_list.append(lis[i])\n else:\n new_list.append(new_list[i-1] + lis[i])\n return new_list", "def cdf(self,x):\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == sortedMapping[-1][0]:\n return 1.0\n if x in self.values:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x == ( float(element[0]) if self.isFloat else element[0] ):\n return cumulative\n else:\n if self.isFloat:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x >= element[0]:\n return cumulative\n # if we reach this point we must error out\n self.raiseAnError(IOError,'Categorical distribution cannot calculate cdf for ' + str(x))", "def cumulative_probability_distribution(self):\n return list(accumulate(self.probability_distribution()))", "def _cumulative_sum(xs):\r\n cumsum = 0\r\n for x in xs:\r\n cumsum += x\r\n yield cumsum", "def _cumsum(self) -> np.ndarray:\n\n if not hasattr(self, \"__cumsum\"):\n self.__cumsum = np.insert(np.cumsum(self.sizes), 0, 0)\n return self.__cumsum", "def calculate_delta(self, name, previous, count):\n if count < previous:\n logger.error(\n \"Saw a non-monotonically increasing value for \"\n \"metric {name}\".format(name=name))\n return 0\n return count - previous", "def processed_cum_overall(self):\n self.processed_cum_overall = (\n self.cumulative_stats_for_team_each_year\n [['Season','TeamID','win_rate','total_score','total_opponent_score','fgp','fg3p','ftp', 'total_rebounds','total_off_rebounds','total_def_rebounds',\n 'total_off_rebounds_percent','total_def_rebounds_percent','total_rebound_possession_percent','total_rebound_possessiongain_percent','total_blocks',\n 'total_assists','total_steals','total_turnover','total_personalfoul','total_block_opp_FGA_percent','total_assist_per_fgm','total_assist_turnover_ratio',\n 'expectation_per_game','avg_lose_score_by','avg_win_score_by']]\n )", "def _discounted_cumsum(self, rewards, rate=None):\n # HINT1: note that each entry of the output should now be unique,\n # because the summation happens over [t, T] instead of [0, T]\n # HINT2: it is possible to write a vectorized solution, but a solution\n # using a for loop is also fine\n rate = self.gamma if rate is None else rate\n\n rewards = np.array(rewards)\n disounted_return = list(\n accumulate(rewards[::-1], lambda ret, rew: rate * ret + rew))\n disounted_return = np.array(disounted_return)[::-1]\n return disounted_return", "def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf", "def cum_distance(self, cum_distance):\n\n self._cum_distance = cum_distance", "def cumulative_right (self):\n # TODO: include proper errors\n return Line (self.bins, self.values.cumsum ())", "def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse):\n if reverse:\n events = events.sort_index(ascending=False)\n population = events['entrance'].sum() - events['removed'].cumsum().shift(1).fillna(0)\n deaths = events['observed'].shift(1).fillna(0)\n estimate_ = np.cumsum(_additive_f(population, deaths)).ffill().sort_index()\n var_ = np.cumsum(_additive_var(population, deaths)).ffill().sort_index()\n else:\n deaths = events['observed']\n population = events['entrance'].cumsum() - events['removed'].cumsum().shift(1).fillna(0) #slowest line here.\n estimate_ = np.cumsum(_additive_f(population, deaths))\n var_ = np.cumsum(_additive_var(population, deaths))\n\n estimate_ = estimate_.reindex(timeline, method='pad').fillna(0)\n var_ = var_.reindex(timeline, method='pad')\n var_.index.name = 'timeline'\n estimate_.index.name = 'timeline'\n\n return estimate_, var_", "def value(self):\n current_value = self.initial_value * self.schedule(self.step / self.nvalues)\n self.step += 1.\n return current_value", "def cumulative_capacity_rule(_m, g, y):\r\n\r\n return sum(m.x_c[g, j] for j in m.Y if j <= y)", "def exclusive_cumsum(x):\n return torch.cumsum(torch.cat([x.new_zeros(x.size(0), x.size(1), x.size(2), 1), x[:, :, :, :-1]], dim=-1), dim=-1)", "def _add_if_observed(Idx, cumsum):\n \n with tf.name_scope(\"add_if_observed\"):\n cumsum = tf.cond(tf.equal(self.O[Idx], 1), \n lambda: _add_to_cumSum(Idx, cumsum),\n lambda: tf.cast(cumsum, tf.float32)) \n \n Idx = tf.cast(tf.add(Idx, 1), tf.int32)\n \n return Idx, cumsum", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumulative(requestContext, seriesList, consolidationFunc='sum'):\n return consolidateBy(requestContext, seriesList, 'sum')", "def setEnableCumulative(self, value):\n return self._set(enableCumulative=value)", "def get_cumulative_reward_spec(self) -> Tuple[Optional[float], Optional[float]]:\n if self == RewardType.EVERY_STEP_ZERO_SUM:\n return 0., 1.\n elif self == RewardType.EVERY_STEP_LENGTH:\n return 0., 1.\n elif self == RewardType.ON_EAT_AND_ON_DEATH:\n return -1., 1.\n elif self == RewardType.RANK_ON_DEATH:\n return -1., 1.\n else:\n raise ValueError(f'RewardType not yet implemented: {self.name}')", "def cumsum(x, axis=None):\r\n return CumsumOp(axis=axis)(x)", "def accumulate(self, value):\n inc_counter_op = smart_assign(self._counter, 1.0, assign_fn=tf.assign_add)\n acc_op = smart_assign(self._acc_var, value, assign_fn=tf.assign_add)\n return tf.group(inc_counter_op, acc_op)", "def augmented_value(self, c):\n\t\t#test to make sure this division isnt just 0\n\t\treturn self.value + c/self.visited", "def EvaluateCumulativeGaussian(self, *args):\n return _ITKCostFunctionsPython.itkCumulativeGaussianCostFunction_EvaluateCumulativeGaussian(self, *args)", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumsum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumsum,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def accumulate(values):\n # TODO: replace usage with numpy.cumsum(values) after adding numpy\n accumulation = 0\n for value in values:\n accumulation += value\n yield accumulation", "def update(self, current, values=None, finalize=None):\n if finalize is None:\n if self.target is None:\n finalize = False\n else:\n finalize = current >= self.target\n\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n # In the case that progress bar doesn't have a target value in the first\n # epoch, both on_batch_end and on_epoch_end will be called, which will\n # cause 'current' and 'self._seen_so_far' to have the same value. Force\n # the minimal value to 1 here, otherwise stateful_metric will be 0s.\n value_base = max(current - self._seen_so_far, 1)\n if k not in self._values:\n self._values[k] = [v * value_base, value_base]\n else:\n self._values[k][0] += v * value_base\n self._values[k][1] += value_base\n else:\n # Stateful metrics output a numeric value. This representation\n # means \"take an average from a single value\" but keeps the\n # numeric formatting.\n self._values[k] = [v, 1]\n self._seen_so_far = current\n\n now = time.time()\n info = ' - %.0fs' % (now - self._start)\n if self.verbose == 1:\n if now - self._last_update < self.interval and not finalize:\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\b' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '%7d/Unknown' % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n\n if self.target is None or finalize:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += ' %.0fs/%s' % (time_per_unit, self.unit_name)\n elif time_per_unit >= 1e-3:\n info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)\n else:\n info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)\n else:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = '%d:%02d:%02d' % (eta // 3600,\n (eta % 3600) // 60, eta % 60)\n elif eta > 60:\n eta_format = '%d:%02d' % (eta // 60, eta % 60)\n else:\n eta_format = '%ds' % eta\n\n info = ' - ETA: %s' % eta_format\n\n for k in self._values_order:\n info += ' - %s:' % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n else:\n info += ' %s' % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if finalize:\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if finalize:\n numdigits = int(np.log10(self.target)) + 1\n count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += ' - %s:' % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now", "def cumulativeMovingAverage(self, x: list) -> list:\n N = len(x)\n C = [0] * (N + 1)\n \n for n in range(len(x)):\n C[n + 1] = C[n] + (x[n] - C[n])/(n + 1)\n \n C.pop(0) # remove leading 0\n \n return C", "def drawdown(returns):\n val = returns.cumsum()\n running_max = val.expanding().max()\n drawdown_series = val - running_max\n return drawdown_series", "def cumulative_energy_vertical(img):\n e = energy_map(img) # Total energy \n M = np.zeros((e.shape[0], e.shape[1]), dtype=type(e)) #To store cumulative minimum energy\n row,col = e.shape\n M[0] = e[0] #First row is same as energy_map first row\n for i in range(1,row):\n for j in range(0,col):\n if j == 0:\n M[i,j] = e[i,j] + min(M[i-1,j],M[i-1,j+1])\n elif j == col-1:\n M[i,j] = e[i,j] + min(M[i-1,j-1],M[i-1,j])\n else:\n M[i,j] = e[i,j] + min(M[i-1,j-1],M[i-1,j],M[i-1,j+1]) \n return M", "def estimate(values, target):\n\n # next time\n # diff(values)\n\n\n return 1.", "def reward_mean_100_running(self, cumreward, episode):\r\n if episode == 0:\r\n cumreward_mean100 = cumreward[0]\r\n elif episode <= 100 and episode > 0:\r\n cumreward_mean100 = np.mean(cumreward[:episode+1])\r\n else:\r\n cumreward_mean100 = np.mean(cumreward[episode - 100:episode])\r\n return cumreward_mean100", "def test_cumulative_distribution_fit_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def cummax(self):\n return self._lift(lambda c: c.cummax)", "def make_cumulative_distr_plot(data):\n x = data.index\n y = data[\"cumprop\"]\n plot = go.Bar(x=x, y=y, showlegend=False)\n\n return plot", "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]", "def test_cumulative_distribution_fit_df_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def cumulative_sum(some_list):\n #This function will return new list that every element is the sum of the element before.\n for i in range(len(some_list)):\n new_list = some_list\n if i > 0: #if it's not the first element in list so it can be sum by element before.\n new_list[i] += new_list[i-1]\n else:\n pass #don't do anything with the first element of the list.\n return new_list", "def get_cumulative(self, search_items, csv=False, output_dir=None, extra_param=None):\n\n # Get data from api and create objects\n api_datas = self.call_api(search_items, \"probability\", \"cumulative\", \"property\", extra_param=extra_param)\n product = [ProbabilityCumulative(api_data) for api_data in api_datas]\n\n if csv:\n csv_format.to_csv(product, \"probability\", \"cumulative\", output_dir=output_dir)\n\n logging.info(\"Probability Cumulative Data Ready.\")\n\n return product", "def to_cumret(self, div_by_first: bool = True, logret: bool = False):\n if not any([True if x == 'Return(Total)' else False for x in self.tsdf.columns.get_level_values(1).values]):\n self.value_to_ret(logret=logret)\n self.tsdf = self.tsdf.add(1.0)\n self.tsdf = self.tsdf.cumprod(axis=0)\n if div_by_first:\n self.tsdf = self.tsdf / self.tsdf.iloc[0]\n self.valuetype = 'Price(Close)'\n self.tsdf.columns = pd.MultiIndex.from_product([[self.label], [self.valuetype]])\n return self", "def __calc_cum_mean_reward(self, token_to_move, next_states):\r\n min_val = -1\r\n max_val = 99\r\n\r\n next_state_sum = np.mean(np.sum(next_states[token_to_move][0]))\r\n \r\n # Get the opponent who is most ahead by finding sum of the state\r\n opponents_next_state_sum = np.sum(next_states[token_to_move][1:])\r\n opponents_mean = np.mean(np.mean(opponents_next_state_sum))\r\n\r\n diff_state_sum = next_state_sum - opponents_mean\r\n\r\n return (diff_state_sum - min_val)/(max_val - min_val)", "def _diff_cdf_at_time_i(pmf: Tensor, y: Tensor) -> Tensor:\n n = pmf.shape[0]\n ones = torch.ones((n, 1), device=pmf.device)\n r = pmf.cumsum(1).matmul(y.transpose(0, 1))\n diag_r = r.diag().view(1, -1)\n r = ones.matmul(diag_r) - r\n return r.transpose(0, 1)", "def compute_cumulants(self, force=False):\n self._cumulant_computer.compute_cumulants(verbose=self.verbose,\n force=force)", "def cumulative_sum(array):\n res = []\n val = 0\n for elem in array:\n val += elem\n res.append(val)\n return res", "def cumsumr(x, axis=0):\n cums = x.cumsum(axis=axis)\n return cums - np.maximum.accumulate(cums*(x == 0), axis=axis)", "def cum_dist_func(self, chi_square_stat):\n # use 1 degree of freedom given df = (R-1) * (C-1); df == (2-1) * (2-1) == 1\n p_value = 1.0 - float(stats.chi2.cdf(chi_square_stat, 1))\n return p_value", "def reverse_CDF(self):\n self.series_y = 1. - self.series_y\n self.quantile_series_y_lower = 1. - self.quantile_series_y_lower\n self.quantile_series_y_upper = 1. - self.quantile_series_y_upper", "def cummulativeCompare(values, pos, diff, offset) :\n\tnewvalues, newpos = hopMax(diff, offset)\n\tvalues, pos = mergeHopMax(values, pos, newvalues, newpos)\n\treturn values, pos", "def cumulative_gamma(self, state: EnvState, params: EnvParams) -> float:\n return params.gamma**state.step", "def compute_cdf(ordered_weights):\n return numpy.cumsum(ordered_weights) - 0.5 * ordered_weights", "def cumulative_cm(locations, recombination_data):\n np_locations = np.array(locations, dtype = np.uint32, copy = False)\n base_ends = recombination_data.bases\n assert np.all(np_locations <= base_ends[-1])\n end_index = np.searchsorted(base_ends, np_locations, side = \"left\")\n \n cm_ends = recombination_data.cm\n cm_distance = cm_ends[end_index]\n \n bp_difference = base_ends[end_index] - locations\n rates = recombination_data.rates\n cm_difference = bp_difference * rates[end_index - 1]\n cm_difference[end_index == 0] = 0\n\n adjusted_cm_distance = cm_distance - cm_difference\n assert np.all(0 <= adjusted_cm_distance) , \"Expected positive cM distances, got {}\".format(adjusted_cm_distance[adjusted_cm_distance < 0])\n assert np.all(adjusted_cm_distance <= cm_ends[-1])\n return adjusted_cm_distance", "def impute_cumulative_array(array):\n array = np.array(array).copy()\n array = convert_non_monotonic_to_nan(array)\n array = log_interpolate(array)\n return array", "def daily_rolling_drawdown(cumulative_returns, rolling_max):\n\n return (cumulative_returns / rolling_max) - 1", "def criticalvalue(tail, alpha):\n cumtail = np.cumsum(tail[::-1])\n # Valor crítico. Atenção! Não é a mesma convenção do aft-test...\n # este valor crítico ainda pertence à região crítica!\n critvalue = next(i for i, p in enumerate(cumtail[::-1]) if p < alpha)\n return critvalue", "def impute_cumulative_df(df, src_col, dst_col, groupby_col):\n if src_col not in df.columns:\n raise ValueError(f\"'{src_col}' not found\")\n\n if dst_col not in df.columns:\n df[dst_col] = -1\n\n for adm_name in df[groupby_col].unique():\n sub = df.loc[df[groupby_col] == adm_name].copy()\n sub[dst_col] = impute_cumulative_array(sub[src_col])\n\n # Replace non-monotonic values in original `cum_confirmed_cases` column with nulls\n raw_cum_col = \"cum_confirmed_cases\"\n sub.loc[sub[raw_cum_col].notnull(), raw_cum_col] = convert_non_monotonic_to_nan(\n np.array(sub.loc[sub[raw_cum_col].notnull(), raw_cum_col])\n )\n\n df.loc[df[groupby_col] == adm_name] = sub\n\n return df", "def value_to_cumulative_prob(value, hp):\n if isinstance(hp, Fixed):\n return 0.5\n if isinstance(hp, Boolean):\n # Center the value in its probability bucket.\n if value:\n return 0.75\n return 0.25\n elif isinstance(hp, Choice):\n ele_prob = 1 / len(hp.values)\n index = hp.values.index(value)\n # Center the value in its probability bucket.\n return (index + 0.5) * ele_prob\n elif isinstance(hp, (Int, Float)):\n sampling = hp.sampling or 'linear'\n if sampling == 'linear':\n return (value - hp.min_value) / (hp.max_value - hp.min_value)\n elif sampling == 'log':\n return (math.log(value / hp.min_value) /\n math.log(hp.max_value / hp.min_value))\n elif sampling == 'reverse_log':\n return (\n 1. - math.log((hp.max_value + hp.min_value - value) / hp.min_value) /\n math.log(hp.max_value / hp.min_value))\n else:\n raise ValueError('Unrecognized sampling value: {}'.format(sampling))\n else:\n raise ValueError('Unrecognized HyperParameter type: {}'.format(hp))", "def _get_cum_variance(self) -> np.ndarray:\n return np.cumsum(self.pca.explained_variance_ratio_)", "def cdf(self, value=None):\n if value is None:\n value = self.value\n return self.rv.cdf(\n value, *self._pymc_dists_to_value(self.args), **self.kwds\n )", "def get_price_0_cumulative_last(self, pair):\n pair_contract = self.conn.eth.contract(\n address=Web3.toChecksumAddress(pair), abi=SushiswapClient.PAIR_ABI)\n return pair_contract.functions.price0CumulativeLast().call()", "def cumsum(L):\n for i in range(1, len(L)):\n L[i] += L[i-1]\n return L", "def controlDelta(self, prev_delta, acc_rate):\n if (acc_rate > .6):\n return prev_delta * 1.1\n elif (acc_rate < .4):\n return prev_delta / 1.1\n else:\n return prev_delta", "def get_cumulative_value(self, header:TimeSeriesHeaderTypes, timespan:timedelta):\n\n if header not in self._series_data:\n return None\n\n data_series = self._series_data[header]\n\n #All times below are converted to seconds from unix epoch\n start_date = self._series_data[header][0, 0]\n\n start_index = np.argmax(data_series[:,0] > (start_date + timespan.total_seconds()))#Get index of first element after start_time\n stop_index = np.argmax(data_series[:,0] > start_date + timespan.total_seconds() + timespan.total_seconds())\n\n if stop_index == 0:#start_time + timespan > last data date\n stop_index = len(data_series[0])\n\n return np.sum(data_series[start_index:stop_index, 1])", "def get_percdone(self, current_acq=0):\n if current_acq == np.nan:\n # np.nan indicates that progress reporting is in principle\n # supported, but failed. This will be treated like no progress.\n percdone = np.nan\n else:\n percdone = (self.total_nr_acquired_values + current_acq) / (\n np.shape(self.get_sweep_points())[0] * self.soft_avg()) * 100\n try:\n now = time.time()\n if percdone != np.nan and percdone != self._last_percdone_value:\n # progress was made\n self._last_percdone_value = percdone\n self._last_percdone_change_time = now\n log.debug(f'MC: percdone = {self._last_percdone_value} at '\n f'{self._last_percdone_change_time}')\n elif self._last_percdone_change_time == 0:\n # first progress check: initialize _last_percdone_change_time\n self._last_percdone_change_time = now\n self._last_percdone_log_time = self._last_percdone_change_time\n else: # no progress was made\n no_prog_inter = self.no_progress_interval()\n no_prog_inter2 = self.no_progress_kill_interval()\n no_prog_min = (now - self._last_percdone_change_time) / 60\n log.debug(f'MC: no_prog_min = {no_prog_min}, '\n f'percdone = {percdone}')\n msg = f'The current measurement has not made any progress ' \\\n f'for {no_prog_min: .01f} minutes.'\n if now - self._last_percdone_change_time > no_prog_inter \\\n and now - self._last_percdone_log_time > no_prog_inter:\n log.warning(msg)\n self.log_to_slack(msg)\n self._last_percdone_log_time = now\n if now - self._last_percdone_change_time > no_prog_inter2:\n log.debug(f'MC: raising NoProgressError')\n raise NoProgressError(msg)\n except NoProgressError:\n raise\n except Exception as e:\n log.debug(f'MC: error while checking progress: {repr(e)}')\n return percdone" ]
[ "0.6869948", "0.67523324", "0.64255375", "0.6341759", "0.63019013", "0.6291231", "0.62111056", "0.6121284", "0.61202884", "0.60518414", "0.6049944", "0.60443413", "0.59521824", "0.5944532", "0.594417", "0.5942714", "0.5922843", "0.5915984", "0.58872694", "0.58869976", "0.5872482", "0.5866635", "0.58590853", "0.5854164", "0.58457065", "0.5811522", "0.58053577", "0.57482207", "0.5712693", "0.57055557", "0.5665141", "0.56650275", "0.5643591", "0.56377167", "0.56340325", "0.5595871", "0.5592033", "0.5549193", "0.55397725", "0.5529288", "0.5520458", "0.55155593", "0.55124503", "0.55000055", "0.5491555", "0.5469163", "0.54505116", "0.5446451", "0.5427348", "0.54132915", "0.54132915", "0.54132915", "0.5412161", "0.54065585", "0.5404756", "0.5393836", "0.5390323", "0.5390246", "0.53828627", "0.537371", "0.537371", "0.537371", "0.5350099", "0.5338303", "0.5337798", "0.5326048", "0.53101426", "0.53047144", "0.5289364", "0.52883685", "0.5286726", "0.5282776", "0.527209", "0.5271674", "0.52606064", "0.5250006", "0.52483493", "0.524777", "0.5246461", "0.52440727", "0.5195446", "0.5195251", "0.51918715", "0.5179096", "0.5177464", "0.5167736", "0.51659024", "0.5165122", "0.51642525", "0.5162558", "0.51587385", "0.5156577", "0.5147527", "0.51464087", "0.5144609", "0.5128608", "0.5119063", "0.5112591", "0.5108265", "0.5103322" ]
0.5677045
30
Initializes the base class.
def __init__(self, pid, monitor_id, logger, file_pattern): self._pid = pid self._id = monitor_id self._file_pattern = file_pattern # The file object to be read. We always keep this open and just seek to zero when we need to # re-read it. Some of the /proc files do better with this approach. self._file = None # The time we last collected the metrics. self._timestamp = None # True if the reader failed for some unrecoverable error. self._failed = False self._logger = logger self._metric_printer = MetricPrinter(logger, monitor_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(cls):", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def initialize(self):\n\t\tpass", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _init(self):\n pass", "def initialize(self) -> None:\n pass", "def init(self) -> None:\n ...", "def _init(self):\n raise NotImplementedError", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initialize(self): \r\n pass", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def __init__(self):\n superClass.__init__(self)\n # TODO Your initialization stuff here", "def initialize(self):\n pass # pragma: no cover", "def __init__(self, base, **kwargs):\n self.base = base", "def init(self) -> None:", "def __init__ (self):\n pass", "def initialize(self, *args, **kwargs):\n pass", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self, **kwargs):", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n\t\tsuper().__init__()", "def _init(self):", "def initialize(self):\n return", "def init(self):", "def init(self):", "def initialize(self, **kwargs):\n raise NotImplementedError()", "def initialize(self, **kwargs: Any) -> None:\n pass", "def __init__(self):\n super()", "def __init__(self) -> None:\n super().__init__()", "def __init__(self) -> None:\n super().__init__()", "def __init__():", "def initialize(self):\n pass", "def initialise(self):", "def initialize(self, *args, **kwargs):", "def _real_initialize(self):\n pass", "def do_init(self):\n\n pass", "def __init__(self, *, base=None):\n self._base = base\n\n self._map = {}", "def init(self, *args, **kwds):\n pass", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n print(\"Initializing\")", "def __init__(self):\n print(\"Initializing\")", "def __init__(self):\n super().__init__()\n pass", "def __init__(self):\n raise NoInitiation", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(self):\r\n\t\tpass", "def __init__(self, base):\r\n\r\n self.base = base", "def _initialize(self, **kwargs):\n return None", "def __init__(self):\r\n super().__init__()", "def __init__(self, **kwds):\n raise NotImplementedError" ]
[ "0.8141555", "0.78926814", "0.78926814", "0.7889723", "0.78779274", "0.78779274", "0.78618664", "0.78618664", "0.78618664", "0.78496444", "0.7829186", "0.78273886", "0.78159505", "0.7813773", "0.7813773", "0.7813773", "0.7813773", "0.7813773", "0.7808769", "0.7808769", "0.7808769", "0.7808769", "0.7808769", "0.7808769", "0.7808769", "0.7808769", "0.77624005", "0.775657", "0.775657", "0.775657", "0.7689748", "0.7686944", "0.7686428", "0.7676449", "0.7632513", "0.75291556", "0.74872667", "0.7477308", "0.7477308", "0.7477308", "0.7477308", "0.74570495", "0.7413796", "0.7413796", "0.74035144", "0.73987603", "0.73931956", "0.73931956", "0.7378744", "0.735866", "0.73370427", "0.73124045", "0.73124045", "0.728544", "0.72721875", "0.72709405", "0.7269531", "0.72679335", "0.7242781", "0.72341484", "0.72292006", "0.7228974", "0.7228974", "0.7228974", "0.7228974", "0.71966517", "0.71966517", "0.7195791", "0.7179966", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71784896", "0.71773344", "0.71631265", "0.71631265", "0.7148675", "0.7148675", "0.7148675", "0.7148675", "0.7148675", "0.7148675", "0.7148675", "0.7148675", "0.7148675", "0.7148675", "0.7141238", "0.71382123", "0.71337515", "0.7130035", "0.7129101" ]
0.0
-1
Runs a single cycle of the sample collection. It should read the monitored file and extract all metrics.
def run_single_cycle(self, collector=None): self._timestamp = int(time.time()) # There are certain error conditions, such as the system not supporting # a particular proc file type, that we will never recover from. So, # just always early exit. if self._failed: return {} filename = self._file_pattern % self._pid if not collector: collector = {} if self._file is None: try: self._file = open(filename, "r") except IOError as e: # We take a simple approach. If we don't find the file or # don't have permissions for it, then just don't collect this # stat from now on. If the user changes the configuration file # we will try again to read the file then. self._failed = True if e.errno == errno.EACCES: self._logger.error( "The agent does not have permission to read %s. " "Maybe you should run it as root.", filename, ) elif e.errno == errno.ENOENT: self._logger.error( ( "The agent cannot read %s. Your system may not support that proc file " 'type or the process with pid "%s" doesn\'t exist' ), filename, self._pid, ) # Ignore 'process not found' errors (likely caused because the process exited # but re-raise the exception for all other errors elif e.errno != errno.ESRCH: raise e if self._file is not None: try: self._file.seek(0) return self.gather_sample(self._file, collector=collector) except IOError as e: # log the error if the errno isn't 'process not found'. Process not found likely means the # process exited, so we ignore that because it's within the realm of expected behaviour if e.errno != errno.ESRCH: self._logger.error( "Error gathering sample for file: '%s'\n\t%s" % (filename, six.text_type(e)) ) # close the file. This will cause the file to be reopened next call to run_single_cycle self.close() return collector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\r\n self.collect_data()", "def gather_sample(self, my_file, collector=None):\n\n pass", "def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)", "def run(self):\n self.logger.info(f'Running {self.__class__.__name__}')\n while True:\n last_check = time.time()\n self.collect_new_events()\n while time.time() - last_check < self._check_for_new_events_interval:\n self.logger.debug('Waiting for new events collection: new collection in {}s'.format(\n self._check_for_new_events_interval - (time.time() - last_check)))\n time.sleep(1)", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def run_resample(self):\n\n self.in_data.open()\n self.out_data.open()\n\n try:\n # Get the fields from the input file and set them/write headers in output:\n self.all_fields = self.in_data.fields\n\n self.out_data.set_fields(self.all_fields)\n self.out_data.write_headers()\n\n # Set up the sensor fields by removing non-sensor fields:\n self.set_sensor_only_fields()\n\n # Read the first event from the input file:\n self.get_next_input_event()\n\n # Warn and exit if we have no input data to read:\n if self.next_input_event is None:\n msg = f\"The input file {self.in_file} did not have any data rows\"\n warn(msg)\n\n return\n\n self.first_event_stamp = self.next_input_event[self.stamp_field]\n\n # Set up the sample tracking (here mostly to set the start of the first interval):\n self.reset_sample_tracking()\n\n # Now iterate through the output intervals:\n while True:\n self.process_next_interval()\n except EOFError: # catch when we are at the end of the file\n pass\n finally:\n self.in_data.close()\n self.out_data.close()\n\n print() # make sure we go to a new output line", "def run(self, repetitions, **kwargs):\n\t\tself.sampler.sample(repetitions, **kwargs)", "def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def run(self):\n operation_manager = self._core.get_operation_manager()\n while True:\n while operation_manager.process_next():\n pass\n sleep(2)", "def run(self):\n while True:\n try:\n sleep(influx_settings.write_freq)\n self._redis2influx()\n except InterruptExceptions as ex:\n raise ex", "def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH,\n '--logtostderr',\n '--input_file=sample.x',\n '--options_file=options.pbtxt',\n ]\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.pbtxt', smp.options.to_pbtxt())\n args_filename = 'args.txt'\n _write_to_file(\n run_dir, args_filename, sample.args_batch_to_text(smp.args_batch)\n )\n args.append('--args_file=args.txt')\n ir_channel_names_filename = None\n if smp.ir_channel_names is not None:\n ir_channel_names_filename = 'ir_channel_names.txt'\n _write_to_file(run_dir, ir_channel_names_filename,\n sample.ir_channel_names_to_text(smp.ir_channel_names))\n args.append('--ir_channel_names_file=ir_channel_names.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files(\n 'sample.x', 'options.pbtxt', args_filename, ir_channel_names_filename\n )\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n for key in value_dict.keys():\n value_dict[key] *= self.coverage\n value_dict['coverage'] = self.coverage\n logging.info(\"coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n logging.info(\"{0}:{1}\".format(key,value))\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")", "def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.json', smp.options.to_json())\n if smp.args_batch:\n _write_to_file(run_dir, 'args.txt',\n sample.args_batch_to_text(smp.args_batch))\n\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH, '--logtostderr', '--input_file=sample.x',\n '--options_file=options.json'\n ]\n if smp.args_batch:\n args.append('--args_file=args.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files('sample.x', 'options.json', 'args.txt')\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)", "def run(self):\n\n # How to retrieve your input data.\n input_1_data = self.in_data['input_1']\n\n # How to retrieve your params value.\n param_1 = self.param['param_1']\n\n # How to process data.\n # Just write any number of methods you want and use them here.\n sample_out_data = self.sample_method(input_1_data, param_1)\n\n # Go to the definition of this method to see how to log.\n self.demo_log()\n\n # This is how to set output data.\n self.out_data['output_1'] = sample_out_data", "def run(self):\n\n step = self.steps['diagnostics_files']\n step.cores = self.config.getint('make_diagnostics_files', 'cores')\n\n # run the step\n super().run()", "def run():\n logger.info(f\"Process started:\")\n logger.info(f\"Converting Glove file to Word2Vec format\")\n convert_to_word2vec.convert(\n \"./data/source/glove.6B.50d.txt\", \"./data/source/glove.6B.50d.w2vformat.txt\"\n )\n\n logger.info(f\"Extracting Click Stream data\")\n extract_click_stream_data()\n\n logger.info(\"Extracting Wiki articles\")\n extract_wiki_articles()\n\n logger.info(f\"Generating Clickstream dataset\")\n generate_datasets()\n\n logger.info(\"Tokenizing articles\")\n WikiArticlesTokenizer().process()\n\n logger.info(\"Creating dataset with Wiki Articles\")\n create_wiki_articles_dataset()", "def test_continuous_bulk_parsing(self, aggregator):\n test_data = ensure_bytes(open(NAGIOS_TEST_LOG).read())\n ITERATIONS = 10\n log_file = tempfile.NamedTemporaryFile(mode=\"a+b\")\n\n # Get the config\n config, nagios_cfg = get_config(\"log_file={}\\n\".format(log_file.name), events=True)\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n for _ in range(ITERATIONS):\n log_file.write(test_data)\n log_file.flush()\n nagios.check(config['instances'][0])\n\n log_file.close()\n assert len(aggregator.events) == ITERATIONS * 503", "def run(self):\n print(\"%s starting up\" % self.getName())\n for count in range(self.accessCount):\n time.sleep(random.randint(1, self.sleepMax))\n value = self.cell.read(lambda counter: self.cell.data.count)\n print(\"%s is done getting %s\" % (self.getName(), str(value)))", "def run(self):\n\n while self.source.SAMPLING:\n wx.CallAfter(self.target, self)\n sleep(0.75)\n\n self.Terminate()", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def run(self):\n\t\tself.data_source.connect()\n\t\twhile self.running:\n\t\t\tself.data_source.read()", "def start_collector(self, callback=None):\n self.log.info(\"starting-pm-collection\", device_name=self.name)\n if callback is None:\n callback = self.perform_test_omci\n\n if self.lc is None:\n self.lc = LoopingCall(callback)\n\n if self.default_freq > 0:\n self.lc.start(interval=self.default_freq / 10)", "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def run(self):\n self.speed_test.start()", "def loop(self):\n pass", "def run(self):\n print(\"%s starting up\" % self.getName())\n for count in range(self.accessCount):\n time.sleep(random.randint(1, self.sleepMax))\n value = self.cell.write(lambda counter: counter.increment())\n print(\"%s is done incrementing to %s\" % (self.getName(), str(value)))", "def _run(self):\n while(self._loop):\n pass", "def run(self):\n self._setupLogger()\n self.setup()\n\n self.logger.info(self.moduleName + \" starting run loop.\")\n\n while True:\n self.loop()", "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")", "def run(self):\n while True:\n try:\n self.check_value()\n except Exception as e:\n print(f\"[ERROR]: Error running the continuous run loop on sample_module: {e}\")\n continue", "def _run_cycle(self):\n pass", "def run(self):\n logger.info(\"Running...\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def test_continuous_bulk_parsing(self, aggregator):\n test_data = open(NAGIOS_TEST_LOG).read()\n events = []\n ITERATIONS = 10\n log_file = tempfile.NamedTemporaryFile(mode=\"a+b\")\n log_file.write(test_data.encode('utf-8'))\n log_file.flush()\n\n # Get the config\n config, nagios_cfg = get_config('\\n'.join([\"log_file={0}\".format(log_file.name)]), events=True)\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, instances=config['instances'])\n nagios.get_topology = mocked_topology\n\n for i in range(ITERATIONS):\n log_file.write(test_data.encode('utf-8'))\n log_file.flush()\n nagios.check(config['instances'][0])\n events.extend(events)\n\n log_file.close()\n assert len(aggregator.events) == ITERATIONS * 505", "def test_runner_full_loop(caplog, dataset):\n caplog.set_level(logging.DEBUG)\n\n session = dataset\n\n start_date = datetime.datetime(2020, 5, 17, 13, 0, 0)\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 5)\n replay_rate = 1 \n \n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=start_date,\n end_date=end_date)\n\n test_publisher = ConsolePublisher()\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system=test_publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n end = time.perf_counter()\n\n code_time = end - start\n \n print(code_time)\n \n assert int(code_time) == 4", "async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])", "def run_inner(self):\n for event in self.inotify.event_gen():\n self.process_inotify_event(event)", "def run(self,mc_sample=None):\n if mc_sample:\n self.mc_sample = mc_sample\n\n total_scores = 0.0\n total_scores_square = 0.0\n self.scores_list =[]\n \n for i in range(self.num_runs): #runs the specified number of Monte Carlo samples\n score = next(self.mc_sample) #next score\n self.scores_list.append(score) \n total_scores += score\n total_scores_square += score**2\n\n self.xhat = total_scores / self.num_runs #mean of score\n self.x2hat = total_scores_square / self.num_runs #mean of score^2\n\n self.sample_variance = (self.num_runs / (self.num_runs - 1.0)) * (self.x2hat - (self.xhat**2))\n self.sample_stddev = np.sqrt(self.sample_variance)\n self.mean_variance = self.sample_variance / (self.num_runs - 1.0)\n self.mean_stddev = np.sqrt(self.mean_variance)", "def test_runner_full_loop_big(caplog, big_dataset):\n caplog.set_level(logging.INFO)\n\n session = big_dataset\n\n start_date = datetime.datetime(2020, 5, 17, 13, 0, 3)\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 6)\n replay_rate = 1 \n\n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=start_date,\n end_date=end_date)\n\n debug_publisher = FilePublisher()\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system=debug_publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n end = time.perf_counter()\n\n code_time = end - start\n assert int(code_time) == 2\n\n ## Test results\n assert os.path.exists('test_tmp/17-05-2020_13-00-03')\n assert os.path.exists('test_tmp/17-05-2020_13-00-05')\n\n assert len(os.listdir('test_tmp/17-05-2020_13-00-03')) == 9\n assert len(os.listdir('test_tmp/17-05-2020_13-00-05')) == 2", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def run_sample(self):\n # there will be validation failures for sample data\n self.validate_req(ignore_failure=True)\n runner_fn = self.model_runner.execute_model_for_sample_data\n return self.do_handle_request(runner_fn)", "def run(self):\n while True:\n # Do something\n print('Doing something imporant in the background')\n\n self.loadData()\n time.sleep(self.interval)", "def run(self):\n\n self.create_trials() # create them *before* running!\n self.start_experiment()\n\n for trail in self.trials:\n trial.run()\n\n self.close()", "def run(self):\n t = Ticker(self.freq)\n while True: \n t.tick()\n #Name, used to identify the observation in memory:\n Operators = self.Operators\n if Operators:\n self.add_property('name', 'HumanDetector')\n self.add_property('Operators', Operators)\n self.store_observation()\n\n self.update()", "def sample(self):\n logger.info(\"%s: collect sensor data\", self.__class__.__name__)\n samples = []\n self._fetch_data(samples)\n return samples", "def test_scan_file(self):\n self.run_scan(self.filename, 1)", "def run(self):\n\n self.load_file()\n self.cat_to_num()\n self.split()", "def run(self):\n self.monitor.start()", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def run_one_test_cycle(self):\n logging.info(\n \"{0} operations remaining: {1}\".format(\n self.args.pm_operation, self.args.repetitions\n )\n )\n\n self.check_last_cycle_duration()\n if self.args.repetitions > 0:\n self.run_pm_command()\n else:\n self.summary()", "def run(self):\n LOG.debug(\"ReaderThread up and running\")\n\n lastevictTime = 0\n while ALIVE:\n for col in allLivingCollectors():\n for line in col.collect():\n self.processLine(col, line)\n now = int(time.time())\n if now - lastevictTime > self.evictinterval:\n lastevictTime = now\n now -= self.evictinterval\n for col in allCollectors():\n col.evictOldkeys(now)\n # BUGBUG : not good\n time.sleep(1)", "def run(self):\n\n self.preprocess()\n self.restore_ratings()\n self.prepare_UI()\n self.loop_through_units()\n self.cleanup()\n\n print('\\nAll Done - results are available in:\\n\\t{}'.format(self.out_dir))", "def do_cycle(self):\n c.check_running()\n online_models = self.get_online_models()\n if len(online_models) > 0:\n self.process_models(online_models)\n self.print_recording()", "def step(self):\r\n self.datacollector.collect(self)\r\n self.datacollector2.collect(self)\r\n self.datacollector3.collect(self)\r\n self.datacollector4.collect(self)\r\n self.datacollector5.collect(self)\r\n self.datacollector6.collect(self)\r\n self.datacollector7.collect(self)\r\n self.datacollector8.collect(self)\r\n self.datacollector9.collect(self)\r\n self.datacollector10.collect(self)\r\n self.datacollector11.collect(self)\r\n self.datacollector12.collect(self)\r\n self.datacollector13.collect(self)\r\n\r\n self.datacollector14.collect(self)\r\n self.datacollector15.collect(self)\r\n self.datacollector16.collect(self)\r\n self.datacollector17.collect(self)\r\n self.datacollector18.collect(self)\r\n self.datacollector19.collect(self)\r\n self.datacollector20.collect(self)\r\n self.datacollector21.collect(self)\r\n self.datacollector22.collect(self)\r\n self.datacollector23.collect(self)\r\n self.datacollector24.collect(self)\r\n self.datacollector25.collect(self)\r\n self.datacollector26.collect(self)\r\n self.schedule.step()", "def _counter(self):\n while True:\n # ensure counter interval is up to date\n self._read_interval_time()\n\n log.debug(\"SFlowManager._counter: sleeping for %s\", self._counter_interval)\n\n time.sleep(self._counter_interval)\n\n # get a cpu times sample\n res = resource.getrusage(resource.RUSAGE_SELF)\n\n # build and send counter structure\n csample = { 'counter_sample': {\n 'app_name': get_sys_name(),\n 'app_resources': {\n 'user_time': int(res.ru_utime * 1000),\n 'system_time': int(res.ru_stime * 1000),\n 'mem_used': 0, # @TODO\n 'mem_max': res.ru_maxrss * 1024,\n 'fd_open': 0, # @TODO do we care?\n 'fd_max': 0, # @TODO \"\"\n 'conn_open': 0, # @TODO couch/rabbit connection summary somehow\n 'conn_max': 0\n },\n 'app_workers':{\n 'workers_active': len(self._container.proc_manager.proc_sup.children),\n 'workers_idle': 0,\n 'workers_max': 1024,\n 'req_delayed': 0,\n 'req_dropped': 0\n }\n }\n }\n\n log.debug(\"Publishing counter stats: %s\" % csample)\n\n self._publish(csample)", "def test_run(self):\n engine = Engine(self.config_file, self.api_token, 23)\n engine.msg_wait_iterations = 0\n\n # Put some stuff on the task queue\n self.setup_helper.add_volumetric_tasks(self.aws_creds[\"access_key\"],\n self.aws_creds['secret_key'],\n self.upload_queue_url, engine.backend)\n\n engine.join()\n engine.run()\n\n # Check for tile to exist\n s3 = boto3.resource('s3')\n ingest_bucket = s3.Bucket(self.ingest_bucket_name)\n\n with tempfile.NamedTemporaryFile() as test_file:\n with open(test_file.name, 'wb') as raw_data:\n ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)\n with open(test_file.name, 'rb') as raw_data:\n # Using an empty CloudVolume dataset so all values should be 0.\n # dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type\n cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')\n unique_vals = np.unique(cuboid)\n assert 1 == len(unique_vals)\n assert 0 == unique_vals[0]", "def run(self):\n\n while not self.__done:\n self.single_cycle()\n\n \"\"\"\n while not self.__done:\n self.step()\n self.debug()\n \"\"\"", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector", "def run(self, stream: np.ndarray) -> None:\n for index, event in enumerate(stream):\n self.event_index = index\n case_id, activity_name, activity_timestamp = (event[0], event[1], event[2])\n if index == 0:\n self.check_point = activity_timestamp\n self.process_event(case_id, activity_name, activity_timestamp)\n\n self.drift_indexes = list(np.unique(self.drift_indexes))\n print(\"Total number of drifts:\", len(self.drift_indexes))\n print(\"Drift points:\", self.drift_indexes)\n cumulative_stream_drifts(len(stream), self.drift_indexes, f'visualization/drifts/{self.name}.pdf')", "def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1", "def test_1000_populations_with_activity_one_per_cycle():\n\n with path.tempdir() as log_parent_folder:\n log_folder = os.path.join(log_parent_folder, \"logs\")\n\n run_test_scenario_1(clock_step=\"15 min\",\n simulation_duration=\"10days\",\n n_stories=1,\n per=pd.Timedelta(\"1 day\"),\n log_folder=log_folder)\n\n logging.info(\"loading produced logs\")\n logs = load_all_logs(log_folder)[\"the_logs\"]\n\n logging.info(\"number of produced logs: {} logs\".format(logs.shape[0]))\n\n # 10 days of simulation should produce 1000 * 1 * 10 == 10000 logs\n assert 9500 <= logs.shape[0] <= 10500", "def run( self, cycles=-1 ):", "def run_single(self):\n self.run_sim_time(1)", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def main():\n stats.set_time_start()\n\n if config.options.show_progress:\n stats.start_monitor()\n\n recorders = Recorder.launch(config.options.recorders)\n\n try:\n for filename in config.filenames:\n parser.parse(filename)\n\n Recorder.wait_empty()\n except KeyboardInterrupt:\n pass\n\n stats.set_time_stop()\n\n if config.options.show_progress:\n stats.stop_monitor()\n\n stats.print_summary()", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")", "def main():\n logging.basicConfig() # create a baseconfiguration s.t. we cann now log \n cycle = 0\n while True:\n\n logging.info(f\"{time.now()} - Start cycle {cycle}\") # changed from print to info \n do_unstable_magick(cycle)\n logging.info(f\"{time.nos()} - Finished cycle {cycle}\")", "def _collect_data(endless=True):\r\n conf = Config.Config().get_config()\r\n\r\n Collector.DataCollector(conf.get('db', 'path'), 'config')\r\n\r\n if endless:\r\n try:\r\n while True:\r\n time.sleep(0.5)\r\n except KeyboardInterrupt:\r\n click.echo(\"Process terminated\")\r\n exit(0)", "def run(self):\n while True:\n self.logger.info(\"Scraping...\")\n print('scraping...')\n decks = []\n try:\n decks.extend(scrape_decks())\n except Exception as e:\n print('scraping exception' + str(e))\n self.logger.exception(\n 'Scraper for TappedOut raised an exception'\n )\n\n self.insert_decks(decks)\n\n self.logger.info(\n \"Done scraping, sleeping for {} days\".format(self.interval)\n )\n time.sleep(self.interval * (60 * 60 * 24))", "def run(self):\n\n sample_counter = 0\n self.started = True\n\n data_queue = Queue()\n with Producer(data_queue, generator=self.generator,\n freq=1 / self.sample_hz):\n while self.running():\n sample_counter += 1\n try:\n sample = data_queue.get(True, 2)\n self.outlet.push_sample(sample)\n if self.add_markers and sample_counter % 1000 == 0:\n self.markers_outlet.push_sample([\"1\"])\n except (Empty, AttributeError):\n # outlet.push_sample(sample) may cause an error after\n # the server has been stopped since the attribute is\n # deleted in another thread.\n break\n\n log.debug(\"[*] No longer pushing data\")", "def main(sc):\n\n # Load data set and parse out statistical counters\n delays = sc.textFile(DATASET).map(counters)\n\n # Perform summary aggregation by key\n delays = delays.reduceByKey(aggregation)\n delays = delays.map(summary)\n\n # Write the results out to disk\n delays.saveAsTextFile(\"delays-summary\")", "def run_once(self):\n # Track some statistics about artifacts in a summary object.\n summary = collections.Counter()\n\n for source in self.sources:\n # Run the source to collect artifacts.\n self.logger.info(f\"Running source '{source}'\")\n try:\n # get the generator of onions\n onions = self.sources[source].run()\n except Exception as e:\n self.logger.error(e)\n self.logger.error(traceback.print_exc())\n continue\n\n # Process onions with each operator.\n for operator in self.operators:\n self.logger.info(f\"Processing found onions with operator '{operator}'\")\n try:\n self.operators[operator].process(onions)\n # Save the source onion with collected data\n except Exception as e:\n self.logger.error(e)\n self.logger.error(traceback.print_exc())\n continue\n\n\n\n# # Record stats and update the summary.\n# types = artifact_types(doc.get('interestingKeywords'))\n# summary.update(types)\n# for artifact_type in types:\n# self.logger.info(f'types[artifact_type]')\n\n # Log the summary.\n self.logger.info(f\"New artifacts: {dict(summary)}\")", "def run(self, input_time_series=None, num_iter=None, record=False,\n output=False):\n pass", "def loop_forever(self):\n while True:\n if self.get_parameter_value(\"publishing_mode\") == \"continuous\":\n self.publishMeasure()", "def run(self):\n self.log.overall('Starting run')\n run_start = time()\n for epoch in xrange(self.n_epochs):\n self.agent.reset()\n self.n_epoch = epoch\n self._run_epoch()\n self.log.overall('End of run ({:.2f} s)'.format(time() - run_start))", "def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()", "def run(self):\n self._logger.info(\"Locator started main loop\")\n self._running = True\n while self._running:\n self._update_locations()\n time.sleep(self.interval)", "def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:\n discovery_duration = benchmark_spec.data_discovery_service.DiscoverData()\n return [\n sample.Sample('data_discovery_duration', discovery_duration, 'seconds',\n benchmark_spec.data_discovery_service.GetMetadata())]", "def run(self):\n while True:\n time.sleep(RTM_READ_DELAY)\n for event in self._slack_client.rtm_read():\n self.handle_event(event)", "def run_measurement(self, probe_name: str) -> None:\n logging.info(f\"Running run {self._measurement_iteration_number}\")\n probe = self._config.probes[probe_name]\n if isinstance(probe, RunnableProbe) and self._ivis_server_available:\n state = self._send_request(Request.STATE, probe=probe)\n else:\n state = None\n self.run_probe(probe_name, f\"run{self._measurement_iteration_number}\", json.dumps(state))\n while self._current_process is not None:\n time.sleep(.01)\n self._measurement_iteration_number += 1", "def run(self):\n print('Starting CloudWatchLogsMonitor.')\n\n # Initialize pool for multithreading.\n # Use ThreadPool for shared memory (used for keeping track of last polled timestamp)\n pool = ThreadPool()\n\n while True:\n\n # Check for new LogGroups and LogStreams.\n self.update()\n\n for log_group in self.log_groups:\n # For every log group get and append log events to log file.\n # This is run in parallel and is non-blocking.\n pool.map_async(LogStream.get_and_append_log_events, log_group.log_streams)\n\n # These lines run the agent synchronously.\n # You need to comment out the pool.map_async line above if using synchronous loop.\n # for log_stream in log_group.log_streams:\n # LogStream.get_and_append_log_events(log_stream)\n\n # Sleep for the polling interval.\n time.sleep(self.default_polling_interval)", "def run(self):\n while True:\n try:\n if not self._read_new_entries(False):\n time.sleep(0.1)\n self._update_all_tasks()\n except KeyboardInterrupt:\n break", "def run(self):\n\t\tfor source in self.sources:\n\t\t\tstringutil.print_color(Fore.GREEN, 'Downloading from Source: %s' % source.get_alias())\n\t\t\tfor r in source.get_elements():\n\t\t\t\tr.set_source(source)\n\t\t\t\tself._queue.put(r)\n\n\t\t\t\t# Extra tracking stuff below:\n\t\t\t\twith self._c_lock:\n\t\t\t\t\tself._total_count+= 1\n\t\t\t\tif self._testing_cache is not None:\n\t\t\t\t\tself._testing_cache.append(r)\n\t\t#print(\"Element loading complete.\\n\")\n\t\tself._running = False", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def _inst_run(self):\r\n self._inst_get_img_info_from_db()\r\n print(\"run method: \", time.ctime())\r\n th = threading.Timer(\r\n 10,\r\n self._inst_run\r\n )\r\n th.start()", "def run(self):\n logging.info('running experiment...')\n self._prepare()\n self._load_data()\n self._run()\n self._evaluate()\n self._summarise()\n return True", "def run(self):\n rate = rospy.Rate(self._run_rate)\n while not rospy.is_shutdown():\n try:\n rate.sleep()\n except:\n break", "def run_extraction(self):\n self.background_estimator = ReflectedRegionsBackgroundEstimator(\n observations=self.observations, **self.config[\"background\"]\n )\n self.background_estimator.run()\n\n self.extraction = SpectrumExtraction(\n observations=self.observations,\n bkg_estimate=self.background_estimator.result,\n **self.config[\"extraction\"]\n )\n\n self.extraction.run()", "def run(self, item_callback=None):\n self.item_callback = item_callback\n logger.info('Starting consumer. Use CTRL+C to stop.')\n while self.shards:\n # time.sleep(0.5)\n for shard in self.shards:\n shard_id = shard['ShardId']\n shard_iterator = self.get_iterator(shard)\n self.process_records(shard_iterator, shard_id)", "def get_next_sample(self):", "def run_test_suite(self, test_config):\n # Folder to store suite results\n test_config['test_suite_start_time'] = datetime.datetime.now().strftime(\n '%Y%m%dT%H%M%S')\n\n instance = cluster_local.UseLocalInstances()\n for i in range(test_config['repeat']):\n self.run_benchmark(test_config, instance, copy=i)\n\n suite_dir_name = '{}_{}'.format(test_config['test_suite_start_time'],\n test_config['test_id'])\n reporting.process_folder(\n os.path.join(self.workspace, 'results', suite_dir_name),\n report_config=self.auto_test_config)", "def start(self):\n for trial in self._trials:\n self._run(trial)", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def run(self) -> None:\n\n # Track the last timestamp we see. When we fetch_stream() again on the\n # next iteration, we'll start from that timestamp onwards to avoid\n # fetching every single page again. The last event or two will be\n # still be in the response, but our de-duping will ignore those.\n last_timestamp = None\n\n # Keep track of what log entries we've consumed so that we suppress\n # duplicates. Duplicates will arise in our stream due to the way we\n # watch for new entries.\n consumed = set() # type: MutableSet\n\n # How many successful vs failed fetch_stream calls. If we consistently see\n # failures but we never see a successful attempt, we should raise an exception\n # and stop.\n success_count = 0\n failure_count = 0\n\n while not self.stopped.wait(0.2):\n try:\n for entry in fetch_stream(self.stream, start_time = last_timestamp):\n if entry[\"eventId\"] not in consumed:\n consumed.add(entry[\"eventId\"])\n\n last_timestamp = entry[\"timestamp\"]\n\n self.consumer(entry)\n except (ClientError, BotocoreConnectionError):\n failure_count += 1\n if failure_count > MAX_FAILURES and not success_count:\n raise\n else:\n success_count += 1", "def loop(self):\n raise NotImplementedError()", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector" ]
[ "0.6506419", "0.64478827", "0.6249575", "0.6136713", "0.6083549", "0.60691047", "0.5937353", "0.5804329", "0.5789543", "0.5710799", "0.5666293", "0.56556666", "0.5654352", "0.5652799", "0.5633741", "0.5620311", "0.5596868", "0.55945826", "0.5588704", "0.5581774", "0.5576543", "0.55601287", "0.55353713", "0.5495253", "0.54940134", "0.5493805", "0.5488754", "0.54880697", "0.5482203", "0.5480609", "0.5476581", "0.54706526", "0.54665756", "0.5457175", "0.54500777", "0.5447955", "0.54458994", "0.5442869", "0.5442835", "0.54400146", "0.54327804", "0.54314846", "0.543115", "0.5418899", "0.5393865", "0.53914666", "0.53805864", "0.5379573", "0.53787696", "0.53787696", "0.53763103", "0.536841", "0.5364883", "0.53533584", "0.5349396", "0.53475714", "0.53432274", "0.53356504", "0.53340316", "0.533204", "0.5329578", "0.5322579", "0.53221947", "0.53221524", "0.5320747", "0.5315338", "0.5315338", "0.5313349", "0.5312985", "0.5309891", "0.53010255", "0.5293643", "0.5279935", "0.5277328", "0.52767044", "0.52741086", "0.5267802", "0.5258659", "0.5256706", "0.52564234", "0.52531135", "0.5243247", "0.5241283", "0.52326834", "0.5231363", "0.5230186", "0.5226842", "0.5226019", "0.52216184", "0.5221095", "0.5212687", "0.5212504", "0.52097464", "0.5200658", "0.51927185", "0.51906025", "0.51906025", "0.5189189", "0.5186495", "0.5176474" ]
0.71581453
0
Reads the metrics from the file and records them. Derived classes must override this method to perform the actual work of collecting their specific samples.
def gather_sample(self, my_file, collector=None): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_metrics(self):\n raise NotImplementedError()", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def walk(self):\n data = open(self.data_file_path, 'rb')\n read_metric = globals()[\"ProtoDefinition\"].Payload()\n read_metric.ParseFromString(data.read())\n\n # One record for the whole file\n self.payload_metadata = read_metric.payloadMetadata\n self.device = read_metric.device\n\n # Get list of all *repeated* field types\n field_names = []\n for field_desc in read_metric.DESCRIPTOR.fields:\n field_name = field_desc.name\n\n if field_desc.label == field_desc.LABEL_REPEATED:\n field_names.append(field_name)\n\n # For each repeated field type, get the data and yield one item at a time\n for field_name in field_names:\n stream_samples = getattr(read_metric, field_name)\n for sample in stream_samples:\n yield self.device, sample", "def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector", "def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)", "def read(self):\n self.record_d = {}\n if self.__read_file():\n self.__print_report()", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # We just look for the different \"inuse\" lines and output their\n # socket type along with the count.\n m = re.search(r\"(\\w+): inuse (\\d+)\", line)\n if m is not None:\n collector.update(\n {\n Metric(\"app.net.sockets_in_use\", m.group(1).lower()): int(\n m.group(2)\n )\n }\n )\n return collector", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n f.write(\"\\nSIMPLE GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSIMPLE GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)", "def run_resample(self):\n\n self.in_data.open()\n self.out_data.open()\n\n try:\n # Get the fields from the input file and set them/write headers in output:\n self.all_fields = self.in_data.fields\n\n self.out_data.set_fields(self.all_fields)\n self.out_data.write_headers()\n\n # Set up the sensor fields by removing non-sensor fields:\n self.set_sensor_only_fields()\n\n # Read the first event from the input file:\n self.get_next_input_event()\n\n # Warn and exit if we have no input data to read:\n if self.next_input_event is None:\n msg = f\"The input file {self.in_file} did not have any data rows\"\n warn(msg)\n\n return\n\n self.first_event_stamp = self.next_input_event[self.stamp_field]\n\n # Set up the sample tracking (here mostly to set the start of the first interval):\n self.reset_sample_tracking()\n\n # Now iterate through the output intervals:\n while True:\n self.process_next_interval()\n except EOFError: # catch when we are at the end of the file\n pass\n finally:\n self.in_data.close()\n self.out_data.close()\n\n print() # make sure we go to a new output line", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def parse_metrics_file(self) -> Dict[int, dict]:\n LOG.info(\"Parsing Dragen demultiplexing adapter metrics file %s\", self.adapter_metrics_path)\n parsed_metrics = {}\n\n with self.adapter_metrics_path.open(\"r\") as metrics_file:\n metrics_reader = csv.DictReader(metrics_file)\n for row in metrics_reader:\n lane = int(row[\"Lane\"])\n read_number = row[\"ReadNumber\"]\n sample_id = row[\"Sample_ID\"]\n parsed_metrics[lane] = parsed_metrics.get(lane, {})\n parsed_metrics[lane][(read_number, sample_id)] = row\n\n return self.summerize_adapter_metrics(parsed_metrics=parsed_metrics)", "def parse_file(self):\n # the header was already read in the init, start at the first sample line\n\n for line in self._stream_handle:\n\n # create the dictionary of key/value pairs composed of the labels and the values from the\n # record being parsed\n # ex: data_dict = {'sci_bsipar_temp':10.67, n1, n2, nn}\n data_dict = self._read_data(line)\n\n if GliderParser._has_science_data(data_dict, self._particle_class):\n # create the timestamp\n timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))\n # create the particle\n self._record_buffer.append(self._extract_sample(\n self._particle_class, None, data_dict, internal_timestamp=timestamp))", "def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []", "def sample(self):\n logger.info(\"%s: collect sensor data\", self.__class__.__name__)\n samples = []\n self._fetch_data(samples)\n return samples", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def collect_metrics(grouped_samples, projroot, tgtdir, ext, grouping=\"sample\"):\n metrics = []\n for item_id, itemlist in grouped_samples.items():\n item = itemlist[0]\n # FIXME: tgtdir should be docroot!\n pfx = os.path.relpath(itemlist[0].prefix(grouping), os.path.dirname(tgtdir))\n mfile = glob.glob(pfx + \".*\" + ext)\n if mfile:\n metrics.append((item_id, mfile[0]))\n return PicardMetricsCollection(metrics)", "def train(self, counts_file):\n for l in read_counts(counts_file):\n n, count_type, args = int(l[0]), l[1], l[2:]\n if count_type == 'WORDTAG': # emission counts\n self.emission_counts[tuple(args)] = n\n else: # ngram counts\n self.ngram_counts[len(args) - 1][tuple(args)] = n", "def compute_metrics(self):\n pass", "def readFile(self, fname):\r\n self.scores = []\r\n self.fname = fname\r\n try:\r\n with open(fname, 'r') as f:\r\n for line in f:\r\n self.appendScore(line.split(' '))\r\n except:\r\n pass", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'Gauge':\n self.read_metgauge(line, f)\n elif self.cleantag(line) == 'ObservationData':\n self.read_obsgauge(line, f)\n # Next line\n line = f.nexttag()", "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def metrics(self, metrics):\n\n self._metrics = metrics", "def parse_file(self):\n # read the first line in the file\n line = self._stream_handle.readline()\n\n while line:\n # check for a data line or a dcl logger line we specifically ignore\n data_match = DATA_LINE_MATCHER.match(line)\n ignore_match = IGNORE_LINE_MATCHER.match(line)\n\n if data_match:\n # found a data line, extract this particle\n # DCL controller timestamp is the port_timestamp\n dcl_controller_timestamp = data_match.groups()[DCL_TIMESTAMP_GROUP]\n port_timestamp = dcl_time_to_ntp(dcl_controller_timestamp)\n\n particle = self._extract_sample(self.particle_class,\n None,\n data_match,\n port_timestamp=port_timestamp,\n preferred_ts=DataParticleKey.PORT_TIMESTAMP)\n\n self._record_buffer.append(particle)\n\n elif not ignore_match:\n # we found a line with an unknown format, call an exception\n error_message = 'Found line with unknown format %s' % line\n log.warn(error_message)\n self._exception_callback(SampleException(error_message))\n\n # read the next line\n line = self._stream_handle.readline()", "def __init__(self, file_name: str):\n self.case_metrics = []\n self.cluster_metrics = []\n self.file_name = file_name\n\n self.path_to_pmg_metrics = f'metrics/{file_name}_process_model_graphs'\n self.path_to_pmg_vis = f'visualization/{file_name}_process_model_graphs'\n self.path_to_drifts = 'visualization/drifts'\n self.path_to_case_metrics = 'metrics/case_metrics'\n self.path_to_cluster_metrics = 'metrics/cluster_metrics'\n try:\n makedirs(self.path_to_pmg_metrics, exist_ok=True)\n makedirs(self.path_to_pmg_vis, exist_ok=True)\n makedirs(self.path_to_drifts, exist_ok=True)\n makedirs(self.path_to_case_metrics, exist_ok=True)\n makedirs(self.path_to_cluster_metrics, exist_ok=True)\n\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']) \\\n .to_csv(f'{self.path_to_case_metrics}/{file_name}.csv', index=False)\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']) \\\n .to_csv(f'{self.path_to_cluster_metrics}/{file_name}.csv', index=False)\n except Exception as e:\n print(e)", "def possible_metrics(filename):\n\n metrics = {}\n\n raw_data = parse_config(filename)\n\n for graph in raw_data:\n for metric in graph['metrics']:\n metrics.update({metric['label']: [(metric['x_stream'], metric['y_stream'], metric['z_stream']), metric['func']]})\n\n return metrics", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def populate_list(self, input_filename):\r\n f = open(input_filename, 'r')\r\n\r\n for line in f:\r\n # Process the input line\r\n line_split = line.strip().split('(')\r\n line_split[-1] = line_split[-1][:-1] # Removes the extra bracket at the end\r\n\r\n class_name = line_split[0]\r\n parameters = line_split[1].split(',')\r\n self.metrics.append(self.instantiate_class(class_name, *parameters))\r\n\r\n f.close()", "def parse_file(self):\n file_time = ''\n num_dir = 0\n num_freq = 0\n freq_w_band = 0.0\n freq_0 = 0.0\n start_dir = 0.0\n\n dspec_matrix = []\n\n # Extract the file time from the file name\n input_file_name = self._stream_handle.name\n\n match = FILE_NAME_MATCHER.match(input_file_name)\n\n if match:\n file_time = match.group(1)\n else:\n error_message = 'Unable to extract file time from DSpec input file name: %s '\\\n % input_file_name\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the first line in the file\n line = self._stream_handle.readline()\n\n # loop over all lines in the data file\n while line:\n\n if EMPTY_LINE_MATCHER.match(line):\n # ignore blank lines, do nothing\n pass\n\n elif HEADER_MATCHER.match(line):\n\n # we need header records to extract useful information\n for matcher in HEADER_MATCHER_LIST:\n header_match = matcher.match(line)\n\n if header_match is not None:\n\n # Look for specific header lines and extract header fields\n if matcher is DIR_FREQ_MATCHER:\n num_dir = int(header_match.group(1))\n num_freq = int(header_match.group(2))\n\n elif matcher is FREQ_BAND_MATCHER:\n freq_w_band = header_match.group(1)\n freq_0 = header_match.group(2)\n\n elif matcher is START_DIR_MATCHER:\n start_dir = header_match.group(1)\n\n else:\n #ignore\n pass\n\n elif DSPEC_DATA_MATCHER.match(line):\n\n # Extract a row of the Directional Surface Spectrum matrix\n sensor_match = DSPEC_DATA_MATCHER.match(line)\n data = sensor_match.group(1)\n values = [int(x) for x in data.split()]\n\n num_values = len(values)\n\n # If the number of values in a line of data doesn't match num_dir,\n # Drop the record, throw a recoverable exception and continue parsing\n if num_values != num_dir:\n error_message = 'Unexpected Number of directions in line: expected %s, got %s'\\\n % (num_dir, num_values)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n else:\n # Add the row to the dspec matrix\n dspec_matrix.append(values)\n\n else:\n # Generate a warning for unknown data\n error_message = 'Unexpected data found in line %s' % line\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the next line in the file\n line = self._stream_handle.readline()\n\n # Check to see if the specified number of frequencies were retrieved from the data\n dspec_matrix_length = len(dspec_matrix)\n if dspec_matrix_length != num_freq:\n error_message = 'Unexpected Number of frequencies in DSpec Matrix: expected %s, got %s'\\\n % (num_freq, dspec_matrix_length)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # Construct the parsed data list to hand over to the Data Particle class for particle creation\n parsed_data = [\n file_time, # ('file_time', 0, str),\n num_dir, # ('num_dir', 1, int),\n num_freq, # ('num_freq', 2, int),\n freq_w_band, # ('freq_w_band', 3, float),\n freq_0, # ('freq_0', 4, float),\n start_dir, # ('start_dir', 5, float),\n dspec_matrix # ('directional_surface_spectrum', 6, list)]\n ]\n\n # Extract a particle and append it to the record buffer\n particle = self._extract_sample(AdcptMDspecInstrumentDataParticle, None, parsed_data)\n self._record_buffer.append(particle)", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))", "def samples(self):\n pass", "def calculate_batch_metrics(self):\n pass", "def make_metrics(self):\n num_batches = self.data_loader.number_of_batches()\n dose_score_vec = np.zeros(num_batches)\n\n # Only make calculations if data_loader is not empty\n if not self.data_loader.file_paths_list:\n print('No patient information was given to calculate metrics')\n else:\n # Change batch size to 1\n self.data_loader.batch_size = 1 # Loads data related to ground truth patient information\n if self.dose_loader is not None:\n self.dose_loader.batch_size = 1 # Loads data related to ground truth patient information\n\n for idx in tqdm.tqdm(range(num_batches)):\n # Get roi masks for patient\n self.get_constant_patient_features(idx)\n # Get dose tensors for reference dose and evaluate criteria\n reference_dose = self.get_patient_dose_tensor(self.data_loader)\n if reference_dose is not None:\n self.reference_dose_metric_df = self.calculate_metrics(self.reference_dose_metric_df, reference_dose)\n # If a dose loader was provided, calculate the score\n if self.dose_loader is not None:\n new_dose = self.get_patient_dose_tensor(self.dose_loader)\n # Make metric data frames\n self.new_dose_metric_df = self.calculate_metrics(self.new_dose_metric_df, new_dose)\n # Evaluate mean absolute error of 3D dose\n dose_score_vec[idx] = np.sum(np.abs(reference_dose - new_dose)) / np.sum(self.possible_dose_mask)\n # Save metrics at the patient level (this is a template for how DVH stream participants could save\n # their files\n # self.dose_metric_df.loc[self.patient_list[0]].to_csv('{}.csv'.format(self.patient_list[0]))\n\n if self.dose_loader is not None:\n dvh_score = np.nanmean(np.abs(self.reference_dose_metric_df - self.new_dose_metric_df).values)\n dose_score = dose_score_vec.mean()\n return dvh_score, dose_score\n else:\n print('No new dose provided. Metrics were only calculated for the provided dose.')", "def set_metrics(self):", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def calculate_dataset_metrics(self):\n pass", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def __parse(self):\n lines = self.file.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n tokens = line.split()\n if tokens[0] == \"#start\":\n trial_name = tokens[1]\n trial = Trial(trial_name)\n self.trials[trial_name] = trial\n elif tokens[0] == \"#end\":\n continue\n else:\n date_str = tokens[0] + \" \" + tokens[1]\n date = datetime.strptime(date_str, \"%m/%d/%y %H:%M:%S\")\n sound_file = line[18:-1].strip()\n event = Event(date, sound_file, 0)\n trial.addevent(event)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def load_data(self):\n logging.debug('Loading data from file ({})...'.format(self.file_name))\n parsed_data = list()\n with open(self.file_name) as file_data:\n for line in file_data.readlines():\n temp = dict()\n if 'JD' in line:\n continue\n line = line.split()\n temp['ts'], temp['mag'], temp['dif'] = float(line[0][:14]), float(line[1]), float(line[2])\n temp['f_mag'] = self.kalman_filter(temp['mag'])\n temp['dt'] = self.jd_to_datetime(temp['ts'])\n temp['dt_cor'] = self.jd_to_datetime(temp['ts'] - TIME_CRT)\n parsed_data.append(temp)\n logging.debug(' {} records loaded.'.format(len(parsed_data)))\n logging.debug(parsed_data[0])\n self.data_stream = parsed_data", "def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values", "def write_training_metrics(self) -> None:\n self.trainer_metrics.write_training_metrics()", "def getUnscaledSamples(self, **kwargs) -> TimeData:\n # initialise chans, startSample and endSample with the whole dataset\n options = self.parseGetDataKeywords(kwargs)\n\n # get the files to read and the samples to take from them, in the correct order\n dataFilesToRead, samplesToRead, scalings = self.getDataFilesForSamples(\n options[\"startSample\"], options[\"endSample\"]\n )\n numSamples = options[\"endSample\"] - options[\"startSample\"] + 1\n # set up the dictionary to hold the data\n data = {}\n for chan in options[\"chans\"]:\n data[chan] = np.zeros(shape=(numSamples), dtype=self.dtype)\n\n # loop through chans and get data\n sampleCounter = 0\n for dFile, sToRead, scalar in zip(dataFilesToRead, samplesToRead, scalings):\n # get samples - this is inclusive\n dSamples = sToRead[1] - sToRead[0] + 1\n # spam files always record 5 channels\n dSamplesRead = dSamples * self.recChannels[dFile]\n # read the data\n byteOff = (\n self.dataByteOffset[dFile]\n + sToRead[0] * self.recChannels[dFile] * self.dataByteSize\n )\n dFilePath = os.path.join(self.dataPath, dFile)\n dataRead = np.memmap(\n dFilePath,\n dtype=self.dtype,\n mode=\"r\",\n offset=byteOff,\n shape=(dSamplesRead),\n )\n # now need to unpack this\n for chan in options[\"chans\"]:\n # check to make sure channel exists\n self.checkChan(chan)\n # get the channel index - the chanIndex should give the right order in the data file\n # as it is the same order as in the header file\n chanIndex = self.chanMap[chan]\n # use the range sampleCounter -> sampleCounter + dSamples, because this actually means sampleCounter + dSamples - 1 as python ranges are not inclusive of the end value\n # scale by the lsb scalar here - note that these can be different for each file in the run\n data[chan][sampleCounter : sampleCounter + dSamples] = (\n dataRead[chanIndex : dSamplesRead : self.recChannels[dFile]]\n * scalar[chan]\n )\n # increment sample counter\n sampleCounter = sampleCounter + dSamples # get ready for the next data read\n\n # return data\n startTime, stopTime = self.sample2time(\n options[\"startSample\"], options[\"endSample\"]\n )\n comments = []\n comments.append(\n \"Unscaled data {} to {} read in from measurement {}, samples {} to {}\".format(\n startTime,\n stopTime,\n self.dataPath,\n options[\"startSample\"],\n options[\"endSample\"],\n )\n )\n comments.append(\"Data read from {} files in total\".format(len(dataFilesToRead)))\n comments.append(\n \"Data scaled to mV for all channels using scalings in header files\"\n )\n comments.append(\"Sampling frequency {}\".format(self.getSampleFreq()))\n return TimeData(\n sampleFreq=self.getSampleFreq(),\n startTime=startTime,\n stopTime=stopTime,\n data=data,\n comments=comments,\n )", "def save_case_metrics_on_check_point(self) -> None:\n pd.read_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv')\\\n .append(pd.DataFrame(self.case_metrics,\n columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']))\\\n .to_csv(f'{self.path_to_case_metrics}/{self.file_name}.csv', index=False)\n self.case_metrics = []", "def read_file(self):\n # This is quite ugly but works for now.\n self.header = read_csv(self.file_name, delim_whitespace=True,\n header=TrackData.header_line,\n nrows=1).to_dict(orient='index')[0]\n self.data = read_csv(self.file_name, delim_whitespace=True, \n header=TrackData.data_line)", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def process_file(self, file_path: str) -> None:\n signal = self._loader(file_path)\n if self._is_padding_necessary(signal):\n signal = self._apply_padding(signal)\n feature = self._logspectogramextractor(signal)\n norm_feature = self._normalize(feature)\n if self._no_nan(norm_feature):\n file_name = self._save_feature(norm_feature, file_path)\n self._store_min_max_value(file_name, feature.min(), feature.max())", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def read_callback(data=None):\n\n hits_by_domain = get_hits_by_domain()\n\n if not hits_by_domain:\n collectd.info('hits_by_domain not collected successfully')\n pass\n else:\n for key in hits_by_domain:\n metric = collectd.Values()\n metric.plugin = 'hits_by_domain'\n metric.type = 'count'\n metric.type_instance = key\n metric.values = [hits_by_domain[key]]\n metric.dispatch()", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n for key in value_dict.keys():\n value_dict[key] *= self.coverage\n value_dict['coverage'] = self.coverage\n logging.info(\"coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n logging.info(\"{0}:{1}\".format(key,value))\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")", "def test_get_all_derived_metrics(self):\n pass", "def read_experiment_metrics(TRAIN_CONFIGS):\n metric_dir = TRAIN_CONFIGS.get(\"metrics_dir\")\n mpre = _metric_file_prefixes(TRAIN_CONFIGS)\n\n mtrain = pd.concat([read_metric_frame(f, train=True, metric_dir=metric_dir) for f in mpre],axis=1)\n mval = pd.concat([read_metric_frame(f, train=False, metric_dir=metric_dir) for f in mpre],axis=1)\n\n mtrain = mtrain.sort_index(axis=1,level=0)\n mval = mval.sort_index(axis=1,level=0)\n\n return mtrain, mval", "def read_file(self,fname):\n try:\n self.raw=spiketrain.read_file(fname)\n except Exception:\n self.raw=None\n raise", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def run(self):\r\n self.collect_data()", "def list_metrics(self):\n pass", "def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)", "def parse(self):\n\n # if data has already been parsed, do nothing\n if self._data:\n return\n\n stats = {\n \"genres\": {},\n \"artists\": {},\n \"global_stats\": {\n \"songs\": 0,\n \"lines\": 0,\n \"words\": 0\n }\n }\n try:\n with open(self._filename) as file:\n objects = ijson.items(file, \"item\")\n\n # compute metrics\n for object in objects:\n\n lines = len(object[\"lyrics\"])\n words = sum([len(line.split()) for line in object[\"lyrics\"]])\n\n genre = object[\"genre\"]\n stats[\"genres\"][genre] = stats[\"genres\"].get(genre, {\"artists\": {}})\n genre_obj = stats[\"genres\"][genre]\n genre_obj[\"songs\"] = genre_obj.get(\"songs\", 0) + 1\n genre_obj[\"lines\"] = genre_obj.get(\"lines\", 0) + lines\n genre_obj[\"words\"] = genre_obj.get(\"words\", 0) + words\n genre_obj[\"is_music\"] = genre_obj.get(\"is_music\", 0)\n if object[\"is_music\"] != \"false\":\n genre_obj[\"is_music\"] += 1\n\n artist = object[\"artist\"]\n stats[\"artists\"][artist] = stats[\"artists\"].get(artist, 0) + 1\n stats[\"genres\"][genre][\"artists\"][artist] = stats[\"genres\"][genre][\"artists\"].get(artist, 0) + 1\n\n # update global stats\n stats[\"global_stats\"][\"songs\"] += 1\n stats[\"global_stats\"][\"lines\"] += lines\n stats[\"global_stats\"][\"words\"] += words\n\n # calculate averages for each genre\n for genre, genre_stats in stats[\"genres\"].items():\n genre_stats[\"avg_line_length\"] = genre_stats[\"words\"] / genre_stats[\"lines\"]\n genre_stats[\"avg_lines\"] = genre_stats[\"lines\"] / genre_stats[\"songs\"]\n genre_stats[\"avg_words\"] = genre_stats[\"words\"] / genre_stats[\"songs\"]\n\n # calculate global averages\n stats[\"global_stats\"][\"avg_line_length\"] = stats[\"global_stats\"][\"words\"] / stats[\"global_stats\"][\"lines\"]\n stats[\"global_stats\"][\"avg_lines\"] = stats[\"global_stats\"][\"lines\"] / stats[\"global_stats\"][\"songs\"]\n stats[\"global_stats\"][\"avg_words\"] = stats[\"global_stats\"][\"words\"] / stats[\"global_stats\"][\"songs\"]\n\n self._data = stats\n\n except IOError as e:\n print(\"Exception occurred: \", e)", "def __read(self, filename):\n f = open(filename)\n\n self.startDate = self.__parseDate(f.readline())\n (nRows, nCols) = [int(s) for s in f.readline().split() ]\n\n dataArray = self.__readData(f, nRows, nCols)\n self.__storeDataDict(dataArray)\n self.__appendMetaData(filename)\n self._appendDerivedQuantities()", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def __init__(self, in_file: str, out_file: str, sample_rate: float,\n max_time_gap: timedelta):\n\n self.in_file = in_file\n self.out_file = out_file\n\n # Set up the input/output file objects:\n self.in_data = MobileData(in_file, 'r')\n self.out_data = MobileData(out_file, 'w')\n\n # Hold information about the different fields to use:\n # All fields in the input file:\n self.all_fields = None # type: Optional[Dict[str, str]]\n\n # Stamp field name:\n self.stamp_field = default_stamp_field\n\n # Only sensor fields:\n self.sensor_fields = None # type: Optional[Dict[str, str]]\n\n # List of label fields:\n self.label_fields = default_label_fields\n\n # Determine the output sample interval in seconds:\n self.sample_interval = timedelta(seconds=1.0 / sample_rate)\n\n # Maximum gap between input events - longer than this and we will restart resampling:\n self.max_time_gap = max_time_gap\n\n # The previous and next output stamps to use:\n self.prev_out_stamp = None # type: Optional[datetime]\n self.next_out_stamp = None # type: Optional[datetime]\n\n # The next event from the input file:\n self.next_input_event = None # type: Optional[Dict[str, Union[float, str, datetime, None]]]\n\n # The last-seen input event:\n self.last_seen_input_event = None # type: Optional[Dict[str, Union[float, str, datetime, None]]]\n\n # Information about input events seen in a sample interval:\n self.num_events_in_interval = 0\n self.interval_sensor_values = None # type: Optional[Dict[str, List[float, str]]]\n self.interval_labels = None # type: Optional[Dict[str, List[str]]]\n\n # Status update info:\n self.status_num_events_interval = status_num_events_interval\n self.num_input_events_processed = 0\n self.num_events_since_last_status = 0\n self.first_event_stamp = None # type: Optional[datetime]", "def __init__(self, filepath):\n\n self.filepath = Path(filepath)\n\n # Store log data in line based format\n self.values = None\n self._read_log()\n\n # Store log data in row based format\n self.counters = []\n self.timestamps = []\n self.acceleration = []\n self._store_rows()", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def run_single_cycle(self, collector=None):\n\n self._timestamp = int(time.time())\n\n # There are certain error conditions, such as the system not supporting\n # a particular proc file type, that we will never recover from. So,\n # just always early exit.\n if self._failed:\n return {}\n\n filename = self._file_pattern % self._pid\n\n if not collector:\n collector = {}\n if self._file is None:\n try:\n self._file = open(filename, \"r\")\n except IOError as e:\n # We take a simple approach. If we don't find the file or\n # don't have permissions for it, then just don't collect this\n # stat from now on. If the user changes the configuration file\n # we will try again to read the file then.\n self._failed = True\n if e.errno == errno.EACCES:\n self._logger.error(\n \"The agent does not have permission to read %s. \"\n \"Maybe you should run it as root.\",\n filename,\n )\n elif e.errno == errno.ENOENT:\n self._logger.error(\n (\n \"The agent cannot read %s. Your system may not support that proc file \"\n 'type or the process with pid \"%s\" doesn\\'t exist'\n ),\n filename,\n self._pid,\n )\n # Ignore 'process not found' errors (likely caused because the process exited\n # but re-raise the exception for all other errors\n elif e.errno != errno.ESRCH:\n raise e\n\n if self._file is not None:\n try:\n self._file.seek(0)\n\n return self.gather_sample(self._file, collector=collector)\n\n except IOError as e:\n # log the error if the errno isn't 'process not found'. Process not found likely means the\n # process exited, so we ignore that because it's within the realm of expected behaviour\n if e.errno != errno.ESRCH:\n self._logger.error(\n \"Error gathering sample for file: '%s'\\n\\t%s\"\n % (filename, six.text_type(e))\n )\n\n # close the file. This will cause the file to be reopened next call to run_single_cycle\n self.close()\n return collector", "def __init__(self, count_metrics=None, time_metrics=None, gauge_metrics=None, max_call_count=-1,\n max_time_between_calls=-1):\n super(InMemoryMetrics, self).__init__()\n self._count_metrics = count_metrics if count_metrics is not None else defaultdict(int)\n self._time_metrics = time_metrics if time_metrics is not None \\\n else defaultdict(LatencyTracker)\n self._gauge_metrics = gauge_metrics if gauge_metrics is not None else defaultdict(float)\n self._max_call_count = max_call_count\n self._max_time_between_calls = max_time_between_calls\n\n utcnow_timestamp = arrow.utcnow().timestamp\n\n self._count_call_count = 0\n self._count_last_call_time = utcnow_timestamp\n self._time_call_count = 0\n self._time_last_call_time = utcnow_timestamp\n self._gauge_call_count = 0\n self._gauge_last_call_time = utcnow_timestamp\n self._count_rlock = RLock()\n self._time_rlock = RLock()\n self._gauge_rlock = RLock()\n self._ignore_metrics = False", "def _log_metrics(self, logs, prefix, step):\r\n if logs is None:\r\n logs = {}\r\n\r\n # Group metrics by the name of their associated file writer. Values\r\n # are lists of metrics, as (name, scalar_value) pairs.\r\n logs_by_writer = {\r\n self._train_run_name: [],\r\n self._validation_run_name: [],\r\n }\r\n validation_prefix = 'val_'\r\n for (name, value) in logs.items():\r\n if name in ('batch', 'size', 'num_steps'):\r\n # Scrub non-metric items.\r\n continue\r\n if name.startswith(validation_prefix):\r\n name = name[len(validation_prefix):]\r\n writer_name = self._validation_run_name\r\n else:\r\n writer_name = self._train_run_name\r\n name = prefix + name # assign batch or epoch prefix\r\n logs_by_writer[writer_name].append((name, value))\r\n\r\n with context.eager_mode():\r\n with summary_ops_v2.always_record_summaries():\r\n for writer_name in logs_by_writer:\r\n these_logs = logs_by_writer[writer_name]\r\n if not these_logs:\r\n # Don't create a \"validation\" events file if we don't\r\n # actually have any validation data.\r\n continue\r\n writer = self._get_writer(writer_name)\r\n with writer.as_default():\r\n for (name, value) in these_logs:\r\n summary_ops_v2.scalar(name, value, step=step)", "def loadFeeds(self):\n\n metrics = self.config['metrics']\n for metric in metrics:\n metricConf = self.config['metrics'][metric]\n metricConf['name'] = metric\n source = metricConf['source']['driver']\n if 'metrics' not in self.sources[source['name']]:\n self.sources[source['name']]['metrics'] = []\n\n self.sources[source['name']]['metrics'].append(metricConf)", "def __getitem__(self, idx):\n if (idx >= len(self)) or (idx < 0):\n raise IndexError(f\"Index {idx} if out of dataset range with {len(self)} samples\")\n\n # Identify the file containing the record\n file_id = np.digitize(idx, self.midx_bins, right=False)\n base_idx = self.midx_bins[file_id - 1] if file_id > 0 else 0\n file_idx = idx - base_idx + self._header_lines\n mdata, midx = self.mdata_midx_list[file_id]\n # load sample\n if file_idx == 0:\n i = 0\n j = midx[0]\n else:\n i = midx[file_idx - 1] + 1 # ignore newline\n j = midx[file_idx]\n\n # fetch sample from memmap\n\n try:\n sample = self._fetch_sample_from_memmap(mdata, i, j)\n except Exception as e:\n logging.error(f\"Error while fetching sample from memmap: {e}\")\n logging.error(f\"file_id: {file_id}, file_idx: {file_idx}, i: {i}, j: {j}\")\n raise e\n\n # parse raw text (e.g., tokenize)\n try:\n data = self._build_data_from_text(sample)\n except Exception as e:\n logging.error(\n f\"Error while building data from text, possible issue with sample expected format (see offending sample below): {e}\"\n )\n logging.error(f\"sample: {sample}, file_id: {file_id}, file_idx: {file_idx}, i: {i}, j: {j}\")\n raise e\n\n return data", "def metrics_group():", "def __init__(self, included_metrics: List[str]):\n self.included_metrics = included_metrics\n self.metrics = self._initialize_metrics()", "def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True", "def collect_data(base='.'):\n files = glob(join(base, 'accuracy_*.json'))\n data = {}\n for file in files:\n fields = basename(file).split('_')\n fields = fields[1:-2]\n accuracy = load(open(file))\n weights = load(open(file.replace('accuracy', 'weights')))\n for model in accuracy.keys():\n # add both accuracy and weights\n key = tuple(fields + [model])\n value = accuracy[model] + weights[model]\n data[key] = value\n return data", "def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes", "def __init__(self):\n\n # sample must be between 0 and 1\n if self.sample <= 0 or self.sample > 1:\n raise Exception('sample {} should be > 0 and <= 1'.format(self.sample))\n\n # sample RDD if sample is specified AND rdd has not been pre-sampled\n if self.sample < 1 and not self.pre_sampled:\n self.rdd = self.rdd.sample(False, self.sample, self.seed)\n\n # Assign each RDD with counter. Reduce and collect.\n collectedCounts = self.rdd.reduceByKey(lambda x,y: x+y) \\\n .collect() # (id, count), number of times that count appears)\n\n # function that re-calculates coverage based on sampling\n approximateCounts = lambda counts, sample: int(counts * 1.0/sample)\n\n # restructure each record so record structure is (key: sampleId, value: (coverage, count))\n x = list(map(lambda x: (x[0][0], (x[0][1], approximateCounts(x[1], self.sample))), collectedCounts))\n\n # create dictionary where keys are the sampleId\n self.collectedCounts = collections.defaultdict(set)\n for k, v in x:\n self.collectedCounts[k].add(v)", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def get_accumulated_data(self, topic, start_time, end_time, units):\n ignore_start_time = self._get_value('ignore_start_time', topic)\n ignore_end_time = self._get_value('ignore_end_time', topic)\n adjust_start_time = self._get_value('adjust_start_time', topic)\n adjust_end_time = self._get_value('adjust_end_time', topic)\n\n if ignore_start_time:\n self.logger.debug(\"Service ignoring start time.\")\n start_ts = self.peek_datetime(topic) - adjust_start_time\n else:\n start_ts = start_time - adjust_start_time\n\n if ignore_end_time:\n self.logger.debug(\"Service ignoring end time.\")\n end_ts = self.peek_last_datetime(topic) + adjust_end_time\n else:\n end_ts = end_time + adjust_end_time\n\n self.logger.debug(\"Service processing interval: %f %f\" %(start_ts, end_ts))\n accumulator = weewx.accum.Accum(weeutil.weeutil.TimeSpan(start_ts, end_ts))\n\n for data in self.get_data(topic, end_ts):\n if data:\n try:\n self.logger.debug(\"Service data to accumulate: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n accumulator.addRecord(data)\n except weewx.accum.OutOfSpan:\n self.logger.info(\"Service ignoring record outside of interval %f %f %f %s\"\n %(start_ts, end_ts, data['dateTime'], (to_sorted_string(data))))\n else:\n break\n\n target_data = {}\n if not accumulator.isEmpty:\n aggregate_data = accumulator.getRecord()\n self.logger.debug(\"Service data prior to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(aggregate_data['dateTime']), to_sorted_string(aggregate_data)))\n target_data = weewx.units.to_std_system(aggregate_data, units)\n self.logger.debug(\"Service data after to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(target_data['dateTime']), to_sorted_string(target_data)))\n else:\n self.logger.debug(\"Dervice queue was empty\")\n\n # Force dateTime to packet's datetime so that the packet datetime is not updated to the MQTT datetime\n if ignore_end_time:\n target_data['dateTime'] = end_time\n\n return target_data", "def _update_base_stats(self, base_stats):\n self.total_samples += base_stats[\"sample_size\"]\n self.sample = base_stats[\"sample\"]\n self._empty_line_count += base_stats[\"empty_line_count\"]\n self.memory_size += base_stats[\"memory_size\"]", "def data_for_file (self):\n rowdata = np.array([self.Train_Time_min,self.Train_Time_max,self.Train_Time_avg,\n self.Loss_Value_min,self.Loss_Value_max,self.Loss_Value_avg,\n self.Iterations_min,self.Iterations_max,self.Iterations_avg,\n self.precision_avg,self.recall_avg])\n return rowdata\n\n\n \n\n\n #### FUNCTION DEFINTIIONS ####", "def metadata_reporter(self):\n logging.info('Creating summary report')\n header = '{}\\n'.format(','.join(self.headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # PipelineVersion\n data += self.commit + ','\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)", "def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")", "def save_metrics(self):\n self.data_stats.write.format(\"org.apache.spark.sql.cassandra\").mode(\"append\").options(table=self.cassandra_stats_table, keyspace=self.cassandra_keyspace).save()\n print (\"Saved data successfully\")" ]
[ "0.6955891", "0.69391584", "0.6471244", "0.63956815", "0.6393039", "0.63644856", "0.6352858", "0.6334597", "0.6094666", "0.6007557", "0.5965609", "0.59264725", "0.58482134", "0.5830391", "0.58277", "0.5824727", "0.58074886", "0.57781744", "0.57719123", "0.5734532", "0.5728365", "0.5672178", "0.5573934", "0.55538625", "0.5544432", "0.54948115", "0.54931366", "0.5488283", "0.5481665", "0.5479812", "0.54754204", "0.54436594", "0.5442437", "0.54304355", "0.54052466", "0.5394483", "0.53756493", "0.53573525", "0.5347062", "0.5342278", "0.5322698", "0.5318811", "0.53180283", "0.5297558", "0.527176", "0.5263396", "0.52632946", "0.52606744", "0.52580595", "0.52515453", "0.52507085", "0.5238428", "0.5232012", "0.5229966", "0.52241856", "0.52235484", "0.5213215", "0.5210929", "0.52013975", "0.5198738", "0.51982284", "0.5197051", "0.51916635", "0.51876044", "0.51874673", "0.518478", "0.5171812", "0.5161165", "0.51554507", "0.5144027", "0.51420844", "0.5137179", "0.51362914", "0.51350194", "0.5134247", "0.5122583", "0.51123804", "0.5111448", "0.51091516", "0.50986826", "0.5095876", "0.5095317", "0.5093547", "0.5092727", "0.5083694", "0.50817716", "0.50790346", "0.50777644", "0.50709176", "0.50638574", "0.5061821", "0.5059077", "0.5053847", "0.5053847", "0.50531137", "0.50491977", "0.50474477", "0.5045176", "0.5041334", "0.50402117" ]
0.65097046
2
Closes any files held open by this reader.
def close(self): try: self._failed = True if self._file is not None: self._file.close() self._failed = False finally: self._file = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close( self ):\n \n for file in self._files:\n ir.file_hub.close( file )", "def close_all_files(self):\r\n while self.close_file():\r\n pass", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def close(self):\r\n self._report_file.close()\r\n # Make sure everything's closed.\r\n for files in self._output_files.values():\r\n for f in files.values():\r\n f.close()", "def close_all_file_handles(self):\n\n if isinstance(getattr(self, \"_file_handles\", None), dict):\n for fh in self._file_handles.values():\n fh.close()\n self._file_handles.clear()", "def close_files(self):\n self.wb_alm.close()\n self.wb_defect.close()\n self.wb_enhancement.close()\n self.wb_incident.close()\n self.wb_destination.close()", "def __del__(self):\n for f in self._files:\n f.close()", "def __del__(self):\n for file in list(self.mFiles.values()):\n file.close()", "def close_file_readers(file_reader_last_read_list):\n for file_reader in file_reader_last_read_list:\n file_reader[\"file_reader\"].close()", "def _close(self):\n self.fh.close()", "def close(self):\n self.__file_object.close()", "def __del__(self):\n for handle in self._filehandles:\n handle.close()", "def close(self):\n self.file.close()\n self.file = None", "def close(self):\n if callable(getattr(self._file, 'close', None)):\n self._iterator.close()\n self._iterator = None\n self._unconsumed = None\n self.closed = True", "def _close(self):\n for fd in self._fds:\n try:\n os.close(fd)\n except:\n pass", "def close(self):\n self.__file.close()", "def close(self):\n if not self.file.closed:\n self.file.close()", "def _CloseOutputFiles(self):\n self.gfile.close()\n self.efile.close()", "def close(self):\n if self._open:\n self.grp.file.close()\n self._open = False", "def close(self):\r\n self._fp.close()", "def close(self):\n self.closed = True\n for stream in self.streams:\n stream.close()", "def Close(self):\n super(CPIOArchiveFile, self).Close()\n self._file_entries = None", "def close(self):\n if self.file is not None:\n self.file.close()\n self.file = None", "def __del__(self):\n self.close_files()", "def close(self):\n self.file.close()", "def close(self):\n self.file.close()", "def close(self):\r\n if self._filename and self._fh:\r\n self._fh.close()\r\n self._fh = None", "def close(self):\n if not self.__closed:\n self.counters = { \"error\": 0, \"warning\": 0, \"success\": 0, \"failure\": 0 }\n\n try:\n self.__flush_count = 0\n for handler in self.__filehandlers:\n handler.flush()\n self.__logger.removeHandler(handler)\n handler.close()\n except:\n # do nothing\n pass\n self.__closed = True", "def close(self):\n self._file.close()", "def close(self):\n self._file.close()", "def close_file(self):\n self.root_group.close()", "def close_file(self):\r\n self.file.close()", "def close(self):\n self.f.close()", "def close(self):\n self.f.close()", "def close(self):\n self._fp.close()", "def close(self):\n\t\tself.filep.close()", "def close(self) -> None:\n self.f.close()", "def close(self):\n self.flush()\n self.file.close()\n self.file = None", "def close(self) -> None:\n if self.file_handler:\n self.file_handler.close()", "def close(self):\n self.file_out.close()", "def close_file(self):\n self.file.close()", "def close_file(self):\n self.file.close()", "def close(self):\n self.fout.close()", "def __del__(self):\n for component_name, file in self._file_list.items():\n file.close()", "def close(self):\r\n if self._session:\r\n self._session.close()\r\n self._session = None\r\n try:\r\n self._writer.remove_file()\r\n self._reader.remove_file()\r\n except Oct2PyError:\r\n pass", "def close(self):\n if self.mode == \"w\":\n # Write the content index\n self.cnt.write(self.file)\n\n self.file.close()", "def close(self):\n self.fileobj.close()", "def close(self):\n self.fileobj.close()", "def close_file_handle(self):\n if self.file_handle and self.output_file:\n self.file_handle.close()", "def close(self):\n os.close(self._fd)\n self._fd = None", "def _close( self ):\n for sji in self._sji_data:\n sji.close()", "def close(self):\r\n self.rfile.close()\r\n self.sock_chan.close()", "def close(self):\n self.out_file.close()", "def close(self):\n self._progress.close()\n self._fd.close()", "def close(self):\n file = self.file\n self.file = None\n self.filename = None\n self.current_line = None\n file.close()", "def close(self):\r\n self._fd.close()", "def Close(self):\n if not self._is_open:\n raise IOError('Not opened.')\n\n if self._exe_section:\n self._wrc_stream.close()\n\n self._exe_file.close()\n self._file_object = None\n self._is_open = False", "def close(self) -> None:\n os.remove(self.FILE_NAME)", "def close(self):\n if not self._f:\n return\n\n logger.info(\"Closed {} ({})\".format(self.name, self.num))\n\n self._f.close()\n self._f = None", "def close(self):\n if self._open:\n self._open = False", "def close(self):\n os.close(self.fd)", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def close(self) -> None:\n self.pages = []\n for fo, _reader in self.inputs:\n fo.close()\n\n self.inputs = []\n self.output = None", "def Close(self): \n posix.close(self.File)", "def close_links(self):\n for uri, cf in self._cfs.items():\n cf.close_link()\n\n self._is_open = False", "def close(self):\n self.closed = True\n for cursor in self.cursors:\n try:\n cursor.close()\n except exceptions.Error:\n pass # already closed", "def close(self):\n\n self.r.close()", "def close(self, **kw) -> None:\n super().close(**kw)\n self._file.close()", "def close(self):\n if self._fd_out is not None:\n os.close(self._fd_out)\n self._fd_out = None\n if self._fd_in is not None:\n os.close(self._fd_in)\n self._fd_in = None", "def close(self) -> None:\n self._data_read_thread_exit.set()\n self._plc_raw_reader.close()", "def close(self):\n if self._writable and not self._closed:\n self._closed = True\n with self._seek_lock:\n self._flush_raw_or_buffered()\n if self._seek:\n with handle_os_exceptions():\n self._close_writable()", "def close(self):\n self.hdfile.close()", "def close(self):\n self.hdfile.close()", "def close(self):\n\n\t\tfd = self._fd\n\t\tif fd is not None:\n\t\t\tself._fd = None\n\t\t\tos.close(fd)", "def Close(self):\n if not self._is_open:\n raise IOError('Storage file already closed.')\n\n if not self._read_only:\n self.Flush()\n\n if self._serializers_profiler:\n self._serializers_profiler.Write()\n\n # Make sure to flush the caches so that zipfile can be closed and freed.\n # Otherwise on Windows the ZIP file remains locked and cannot be renamed.\n\n self._offset_tables = {}\n self._offset_tables_lfu = []\n\n self._open_streams = {}\n self._streams_lfu = []\n\n self._event_timestamp_tables = {}\n self._event_timestamp_tables_lfu = []\n\n self._zipfile.close()\n self._zipfile = None\n self._is_open = False\n\n file_renamed = False\n if self._path != self._zipfile_path and os.path.exists(self._zipfile_path):\n # On Windows the file can sometimes be still in use and we have to wait.\n for attempt in range(1, self._MAXIMUM_NUMBER_OF_LOCKED_FILE_ATTEMPTS):\n try:\n os.rename(self._zipfile_path, self._path)\n file_renamed = True\n break\n\n except OSError:\n if attempt == self._MAXIMUM_NUMBER_OF_LOCKED_FILE_ATTEMPTS:\n raise\n time.sleep(self._LOCKED_FILE_SLEEP_TIME)\n\n self._path = None\n self._zipfile_path = None\n\n if self._path != self._zipfile_path and not file_renamed:\n raise IOError('Unable to close storage file.')", "def close(self, close_file=True):\n if self.__ref is not None:\n self.__lib.flush()\n if close_file == True:\n self.__ref.close()\n self.__ref = None\n self.__lib = None", "def close(self):\n self.ref_as_count_h5.close()\n self.alt_as_count_h5.close()\n self.read_count_h5.close()", "def __del__(self):\n\n if self._is_open:\n self.close()", "def close(self):\n\n try:\n self.test.close()\n except Exception as e:\n pass\n\n try:\n self.valid.close()\n except Exception as e:\n pass\n\n for f in self.train:\n try:\n f.close()\n except Exception as e:\n pass", "def Close(self):\n self._RaiseIfNotWritable()\n\n self._storage_file.Close()\n self._storage_file = None", "def release(self, path, fh, *args, **pargs):\n with(self.rwlock):\n # If we're closing a FLACCue file...\n if(path in self._open_subtracks):\n # Delete the file handle from the stored list.\n del self._open_subtracks[path]['Positions'][fh]\n # Close the OS reference to the file.\n return os.close(fh)", "def close():\n # self.consumer.close()\n # close file handler\n LOGGER.info(\"consumer is closed!!\")", "def close(self):\n for lrms in self.resources.itervalues():\n lrms.close()", "async def close(self):\n self.writer.close()\n await self.writer.wait_closed()\n self.reader = None\n self.writer = None\n self.ref = Ref()", "def close(self):\n if not self._open:\n return\n\n # Close out the flush thread, if necessary.\n self._wait_for_flush(interrupt=True)\n\n if self._shelf is not None:\n self._shelf.close()\n self._shelf = None\n\n if self._dir is not None:\n shutil.rmtree(self._dir)\n self._dir = None\n\n if self._cache is not None:\n self._cache.clear()\n self._cache = None\n\n if self._recency is not None:\n self._recency.clear()\n self._recency = None\n\n self._open = False", "def close(self):\n self.extractor.close()", "def close(self):\n self._stream.close()\n self._arch.close()", "def close(self):\n self.read1_batch = None\n self.read2_batch = None\n self.writer.close()", "def close(self):\n if hasattr(self, '_fd'):\n if self._fd is not None:\n os.close(self._fd)\n self._fd = None\n self._name = None", "def close_file(file):\n file.close()", "def close(self):\r\n if not self.closed:\r\n for result in self._decrefAsync():\r\n pass", "def close(self):\n self.is_open = False", "def unload(self):\n for f in self.logs.values():\n f.close()", "def finalize(self):\n self.ratings.close()\n self.users.close()\n self.movies.close()", "def _safe_close(self, fds):\n for fd in fds:\n try:\n os.close(fd)\n except OSError as err:\n if err.errno != errno.EBADF:\n raise\n # TODO(kota_): fd might be closed already, so if already\n # closed, OSError will be raised. we need more refactor to\n # keep clean the file discriptors.\n pass", "def __del__(self):\n self.file.close()", "def __del__(self):\n self.f.close()", "def Close(self):\n self._task_storage_reader.Close()\n self._task_storage_reader = None", "def close(self):\n self.handle.close()", "def _close_writers(self):\r\n with context.eager_mode():\r\n for writer in six.itervalues(self._writers):\r\n writer.close()\r\n self._writers.clear()" ]
[ "0.7826722", "0.77460366", "0.7503366", "0.72826236", "0.72478783", "0.7243321", "0.7204818", "0.7204593", "0.7146203", "0.7134031", "0.7112463", "0.71020526", "0.70459485", "0.7036263", "0.70164305", "0.7010969", "0.6980717", "0.6947164", "0.6919161", "0.68919945", "0.6887085", "0.68868184", "0.6877383", "0.6867741", "0.6864131", "0.6864131", "0.685468", "0.6841972", "0.6820519", "0.6820519", "0.6812216", "0.67919236", "0.6785009", "0.6785009", "0.6784231", "0.6756157", "0.6723754", "0.6718719", "0.67129797", "0.6705118", "0.66987896", "0.66987896", "0.66895354", "0.66576064", "0.66409343", "0.6634567", "0.6630885", "0.6630885", "0.6616791", "0.6607254", "0.6607219", "0.6594297", "0.6589888", "0.6580542", "0.6561231", "0.6556022", "0.65507317", "0.6540388", "0.65108746", "0.65092653", "0.6507087", "0.6498388", "0.6496773", "0.649256", "0.64918256", "0.64800406", "0.6459277", "0.6446404", "0.6443788", "0.64366686", "0.6435256", "0.64265054", "0.64265054", "0.6419693", "0.64104664", "0.6376057", "0.6364515", "0.6345504", "0.63419175", "0.6335406", "0.63319385", "0.632894", "0.63281256", "0.6322816", "0.6321847", "0.6315202", "0.63033295", "0.62970215", "0.6295853", "0.6272278", "0.625856", "0.62504214", "0.62483484", "0.6246879", "0.6243328", "0.6233657", "0.62234163", "0.621895", "0.6216547", "0.6210955" ]
0.6589813
53
Returns the number of centiseconds (1/100ths secs) for the given number of jiffies (a weird timing unit used the kernel).
def __calculate_time_cs(self, jiffies): return int((jiffies * 100.0) / self._jiffies_per_sec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_time_ms(self, jiffies):\n\n return int((jiffies * 1000.0) / self._jiffies_per_sec)", "def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))", "def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3", "def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))", "def millis(): \r\n return int(round(monotonic.monotonic() * C.MILLISECONDS))", "def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),\n _load_time=time.time()):\n try:\n f=open(_proc_pid_stat,'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[13])\n except:\n return int(100*(time.time()-_load_time))", "def elapsed_micros(start: int, /) -> int:", "def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r", "def get_total_cpu_clock_cycles():\n try:\n with open(LINUX_STAT_LOCATION, 'r') as f:\n cpu_entries = f.readline().split(' ')\n except IOError:\n return None\n\n cpu_cycles = 0\n for entry in cpu_entries:\n try:\n cpu_cycles += int(entry)\n except ValueError:\n pass\n return cpu_cycles", "def _get_cpu_interval(self):\n self._polling_execute_frequency = int(self._plugin_conf[u'main'][u'polling_frequency'])\n\n if 5 <= self._polling_execute_frequency < 60:\n return cpmCPUTotalMonIntervalValue # replaces cpmCPUTotal5SecRev\n elif 60 <= self._polling_execute_frequency < 300:\n return cpmCPUTotal1minRev\n elif 300 <= self._polling_execute_frequency:\n return cpmCPUTotal5minRev\n else:\n return cpmCPUTotal1minRev", "def ticks_per_second(self):\n return self._ticks_per_second", "def millis(start_time):\n dt = datetime.now() - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n return ms", "def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)", "def getTime():\n\n return float(time.perf_counter()*1000)", "def tick(self):\n prev_last_tick = self.last_tick_\n self.last_tick_ = timeit.default_timer()\n latest_tick_period = self.last_tick_ - prev_last_tick\n return latest_tick_period", "def millis() -> int:", "def millis():\n return int(round(time() * 1000))", "def get_clock_divisor(self):\n return self.o.read_register(self.dev_id, CLOCK_DIVISOR)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def _unit_sec(self):\n return self.time_base / 60.0", "def _get_milleseconds(self):\n return int(round(time.time() * 1000))", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def __micros():\n return round(time.time() * 1000000)", "def _STEPS2TIME(step):\n return step/1000.", "def _current_epoch_secs():\n now = datetime.datetime.utcnow()\n epoch = datetime.datetime(1970, 1, 1)\n return (now - epoch).total_seconds()", "def n_elements(x, dist, var=None):\n n = dist/mdiff(x)\n if var == 'time':\n n = n/60\n return int(np.round(n))", "def np_dt_epoch_msec(value):\n return value.astype(long) / 1000", "def num_microseconds(self, td):\n return float(td.microseconds + 1000000 * (td.seconds + 86400 * td.days))", "def get_timebase(self,dt):\r\n\r\n if dt < 1E-9:\r\n dt = 1E-9\r\n\r\n if dt > 4E-9:\r\n n = int(dt*125E6 + 2)\r\n else:\r\n dt *= 1E9\r\n n = round(log(dt,2))\r\n return n", "def calculer_energie_cinetique_tnt(energie_cinetique):\n return energie_cinetique / 4184", "def unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def unit_sec(self):\n return self.time_base / 60.0", "def sequenceTime_sec(cmds):\n cycles = sum(MemorySequence.cmdTime_cycles(c) for c in cmds)\n return cycles * 40e-9 # assume 25 MHz clock -> 40 ns per cycle", "def timefactor(self, period):\n\t\tdays = period.size() * self.daysinunit(period.unit)\n\t\treturn float(days)/self.daysinbase", "def transmission_time_us(self, num_bytes):\n bits_to_transmit = num_bytes * 8\n transmission_time_us = (bits_to_transmit / self.megabits_per_second)\n return transmission_time_us", "def cpu_times(percpu=False):\r\n if not percpu:\r\n return _get_sys_cpu_times()\r\n else:\r\n return _get_sys_per_cpu_times()", "def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)", "def getNowMilliseconds():\n return (datetime.datetime.utcnow() - Common.epoch_).total_seconds() * 1000.0", "def cputime(conn):\n c = conn.cursor()\n r = c.execute(\"SELECT SUM(length) as total_bin_time FROM event WHERE bin_id not null\").fetchall()\n total_bin_time = r[0]['total_bin_time'];\n last_time = maxtime(conn)\n\n cputime = 0\n if total_bin_time and last_time:\n cpu_time = total_bin_time / last_time\n\n return {\n \"cpu_time\":cpu_time\n }", "def calculate_seconds_in_days(days):\n return int(days * 86400)", "def cpu_percent(interval=0.1, percpu=False):\r\n global _last_cpu_times\r\n global _last_per_cpu_times\r\n blocking = interval is not None and interval > 0.0\r\n\r\n def calculate(t1, t2):\r\n t1_all = sum(t1)\r\n t1_busy = t1_all - t1.idle\r\n\r\n t2_all = sum(t2)\r\n t2_busy = t2_all - t2.idle\r\n\r\n # this usually indicates a float precision issue\r\n if t2_busy <= t1_busy:\r\n return 0.0\r\n\r\n busy_delta = t2_busy - t1_busy\r\n all_delta = t2_all - t1_all\r\n busy_perc = (busy_delta / all_delta) * 100\r\n return round(busy_perc, 1)\r\n\r\n # system-wide usage\r\n if not percpu:\r\n if blocking:\r\n t1 = cpu_times()\r\n time.sleep(interval)\r\n else:\r\n t1 = _last_cpu_times\r\n _last_cpu_times = cpu_times()\r\n return calculate(t1, _last_cpu_times)\r\n # per-cpu usage\r\n else:\r\n ret = []\r\n if blocking:\r\n tot1 = cpu_times(percpu=True)\r\n time.sleep(interval)\r\n else:\r\n tot1 = _last_per_cpu_times\r\n _last_per_cpu_times = cpu_times(percpu=True)\r\n for t1, t2 in zip(tot1, _last_per_cpu_times):\r\n ret.append(calculate(t1, t2))\r\n return ret", "def cps(self):\n return self.datacounts / self.exptime", "def now():\n\n return rospy.Time.now().to_nsec()", "def get_millis(seconds):\n return seconds * 10 ** 3", "def microseconds_since_epoch(date_time, epoch=None):\n if not epoch:\n epoch = datetime.datetime.utcfromtimestamp(0)\n\n delta = date_time - epoch\n\n # 86400 is 24 * 60 * 60 e.g. total seconds in a day\n return delta.microseconds + (delta.seconds + delta.days * 86400) * 10**6", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)", "def millisecond():\n return int(round(time.time() * 1000))", "def dt_epoch_msecs(value):\n return long(calendar.timegm(value.timetuple())) * 1000", "def EpochNano():\n return int(time.time() * 1000000000)", "def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()", "def timeScale(self) -> int:\n return int(1 / (1 - self.momentum))", "def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def measure_time(n: int, max_delay: int) -> float:\n start_time = time.time()\n asyncio.run(wait_n(n, max_delay))\n return (time.time() - start_time) / n", "def get_time_ms():\n return int(round(time.time() * 1000))", "def measure_time(n: int, max_delay: int) -> float:\n t0 = time.time()\n asyncio.run(wait_n(n, max_delay))\n t1 = time.time()\n total_time = t1 - t0\n return total_time / n", "def elapsed_millis(start: int, /) -> int:", "def unix_time_millisecond(date):\r\n return unix_time(date, float=True) * 1e3", "def _nowms():\n return int(time.time() * 1000)", "def _calc_time_precision(sampling_rate):\n\n # convert sampling rate to int\n sampling_rate = int(sampling_rate)\n\n # calculate the first remainder\n remainder = 1 % sampling_rate\n\n # list for holding all remainders that have been calculated\n seen_remainders = []\n\n # calculate all other remainders until the modulo operation yields either zero or the remainder has already appeared\n # before (this is when the periodicity starts)\n while (remainder != 0) and (remainder not in seen_remainders):\n # append the current remainder to the seen remainder list\n seen_remainders.append(remainder)\n\n # multiply the remainder by ten (this is basically the process of converting a fraction to a decimal number\n # using long division\n remainder = remainder * 10\n\n # calculate the next remainder\n remainder = remainder % sampling_rate\n\n return len(seen_remainders) + 1", "def get_cpu_clock_cycles_of_pid(pid):\n try:\n with open(LINUX_PROCESS_STAT_LOCATION % pid, 'r') as f:\n pid_entries = f.read().split(' ')\n except IOError:\n return None\n\n pid_cycles = 0\n if len(pid_entries) > 14:\n pid_cycles = int(pid_entries[13]) + int(pid_entries[14])\n return pid_cycles", "def part1() -> int:\n longest_sleeper = max(sleep_times, key=lambda g: len(sleep_times[g]))\n sleepiest_minute = max(\n sleep_times[longest_sleeper], key=sleep_times[longest_sleeper].count)\n\n return longest_sleeper * sleepiest_minute", "def _GetSecsUntilNextPass(self):\n op = util.CheckedOp('retrieving last GC pass time',\n self.engine.Get,\n KEY_LAST_PASS_TIME)\n\n if not op.success:\n return None\n\n last_compute = float(op.response_value) if op.response_value else 0\n return last_compute + self.gc_frequency - time.time()", "def hz2cents(pitchInHz, tonic=261.626):\n cents = 1200*np.log2(1.0*pitchInHz/tonic)\n return cents", "def time(self):\n return sum(self._interval) * .5", "def seconds_since_epoch(date_time, epoch=None):\n return microseconds_since_epoch(date_time) / 10.0**6", "def _calibrate_time(self):\n time_overhead = 0\n for i in range(1000):\n start = self._adjusted_time()\n end = self._adjusted_time()\n time_overhead += end - start\n return time_overhead / 1000", "def curr_time_millis():\n return 1000 * timeit.default_timer()", "def millis(self):\n return self._micros // 1000", "def units_to_msec(units, resolution):\n time_ms = units * float(resolution) / 1000\n return time_ms", "def MillisToSec(self):\n self.Millis = [item / 1000 for item in self.Millis]\n return self.Millis", "def period_cntfrac_sqrt(num):\r\n r = limit = int(sqrt(num))\r\n if limit * limit == num:\r\n return 0\r\n k, period = 1, 0\r\n while k != 1 or period == 0:\r\n k = (num - r * r) / k\r\n r = ((limit + r) / k) * k - r\r\n period += 1\r\n return period", "def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg", "def tick(self):\n self.delta = self.clock.tick(50) / 1000.0", "def tick(self):\n self.delta = self.clock.tick(50) / 1000.0", "def get_total_seconds(td):\n return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6", "def get_cpu_clock(verbose=False):\n\n fn = '/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq'\n output = run(\"cat \" + fn, quiet=True)\n # This file apparently only exists if the kernel's power saving module is\n # configured a certain way. I have so far only seen it on cn10 and cn11.\n # It looks like the units are kHz.\n\n clock_in_GHz = None\n\n try:\n clock_in_kHz = int(output)\n clock_in_GHz = float(clock_in_kHz) / (10**6)\n return clock_in_GHz\n except ValueError:\n if verbose:\n print(\"Error: On host = {Host}, unable to get cpu clock in string\\n{Output}\"\n .format(Host=env.host, Output=output))\n\n # The cpuinfo_max_freq file approach didn't work, so get current clock\n # from /proc/cpuinfo\n output = run(\"cat /proc/cpuinfo | grep MHz | uniq\", quiet=True)\n\n regex = re.compile(\n \"\"\"\n .*cpu\\sMHz # any chars before \"cpu MHz\"\n \\s*:\\s* # any amount of whitespace, colon, any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output)\n\n if (len(matches) == 1):\n clock_in_GHz = float(matches[0]) / (10**3) # MHz to GHz\n else:\n print(\"Error: On host = {Host}, unable to determine cpu frequency in string\\n{Output}\"\n .format(Host = env.host, Output = output))\n\n if verbose:\n print(\"{Host:4} | CPU clock: {Clock:4.2f} GHz\".format(Host=env.host, Clock=clock_in_GHz))\n\n return clock_in_GHz", "def time_ms():\n return int(1000 * time.time())", "def clock_helper(total_seconds):\n seconds_in_minute = total_seconds % 60", "def num_ticks(self, start, end, desired_ticks=None):\n if self.resolution is None or self.resolution == 0.0:\n return 0\n else:\n return (end - start) / self.resolution", "def cpu_time(self):", "def __get_uptime_ms(self):\n\n if self._boot_time_ms is None:\n # We read /proc/uptime once to get the current boot time.\n uptime_file = None\n try:\n uptime_file = open(\"/proc/uptime\", \"r\")\n # The first number in the file is the number of seconds since\n # boot time. So, we just use that to calculate the milliseconds\n # past epoch.\n self._boot_time_ms = int(time.time()) * 1000 - int(\n float(uptime_file.readline().split()[0]) * 1000.0\n )\n finally:\n if uptime_file is not None:\n uptime_file.close()\n\n # Calculate the uptime by just taking current time and subtracting out\n # the boot time.\n return int(time.time()) * 1000 - self._boot_time_ms", "def calculate_days(time):\n return int(time / 86400)", "def __current_milli_time(self):\n\n return int(round(time.time() * 1000))", "def proc_start():\n\n fd = open(\"/proc/self/stat\")\n start_clk = int(fd.readline().split()[21])\n start_sec = start_clk // os.sysconf(\"SC_CLK_TCK\")\n fd.close()\n\n fd = open(\"/proc/stat\")\n boot_sec = None\n for line in fd:\n if line.startswith(\"btime\"):\n boot_sec = int(line.split()[1])\n assert boot_sec is not None\n fd.close()\n\n return boot_sec + start_sec", "def sys_up_time():\n\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n return int(uptime_seconds)", "def clock_speed(self):\n return self._clock_speed", "def get_elapsed_seconds():\n\tutcnow = datetime.utcnow()\n\tmidnight_utc = datetime.combine(utcnow.date(), time(0))\n\tdelta = utcnow - midnight_utc\n\treturn delta.total_seconds()", "def current_milli_time(self):\n return int(round(time.time() * 1000))", "def calc_delta_c(c200):\n top = (200. / 3.) * c200**3.\n bottom = np.log(1. + c200) - (c200 / (1. + c200))\n return (top / bottom)", "def time_millis():\n\n return int(time.time() * 1000)", "def startup_time_delta(self):\n return int((time.time() - self.startup_timestamp) * 1000.0)", "def cumulcoolingtime(self, interval):\n return self._getcumultime(interval, self.cooling_times)" ]
[ "0.7653799", "0.652349", "0.6424848", "0.6171948", "0.6161103", "0.6105997", "0.5744971", "0.56402147", "0.5638903", "0.5606995", "0.5588372", "0.5540272", "0.5520585", "0.5486425", "0.54827213", "0.5427575", "0.5426606", "0.5411524", "0.54085886", "0.54085886", "0.54085886", "0.54085886", "0.54085886", "0.54085886", "0.53986", "0.5370979", "0.5370503", "0.5370475", "0.5364911", "0.536195", "0.53587836", "0.5351028", "0.53441715", "0.53379315", "0.53326714", "0.53294146", "0.53270894", "0.53163946", "0.53105134", "0.53103775", "0.53040475", "0.52996546", "0.52967894", "0.52888703", "0.52816117", "0.5278208", "0.52589846", "0.5255259", "0.5253289", "0.5236489", "0.5232979", "0.5219614", "0.52138215", "0.5203136", "0.519974", "0.51875097", "0.51853716", "0.5183313", "0.51727295", "0.51643234", "0.5153411", "0.5152877", "0.51493096", "0.5140954", "0.51248205", "0.5123222", "0.5122819", "0.51215124", "0.5120848", "0.5120689", "0.5120453", "0.5118943", "0.51183826", "0.5118254", "0.5117916", "0.5107305", "0.5105741", "0.5097183", "0.50926906", "0.50898635", "0.5087706", "0.5087706", "0.5078092", "0.50707245", "0.50701797", "0.5068228", "0.5066959", "0.5064344", "0.5061869", "0.50523967", "0.5033879", "0.5030043", "0.50286853", "0.5028655", "0.5025476", "0.5024317", "0.5019504", "0.50168395", "0.5015332", "0.50078374" ]
0.7872766
0